//#include <unordered_map>
#include <map>
#include <iostream>
#include <algorithm>
#include <climits>
#include <math.h>
#include <mpi.h>
#include <vector>

#include <include/dilatation.hpp>
#include <include/edge.hpp>
#include <include/vertex.hpp>
#include <include/pair_of_ints.hpp>
#include <include/graph_utils.hpp>
#include <include/permutation_utils.hpp>
#include <include/distributions.hpp>
#include <include/distribution_utils.hpp>

//#define DEBUG_STD_COUT
#define CHECK_MSG_AMOUNT  100

#define MSG_WORK_REQUEST 1000
#define MSG_WORK_SENT    1001
#define MSG_WORK_NOWORK  1002
#define MSG_TOKEN        1003
#define MSG_FINISH       1004

typedef std::vector<Edge*>::iterator iter_vec_edge;
//typedef std::unordered_map<PairOfInts,bool>::const_iterator citer_unord_map;
typedef std::map<PairOfInts,bool>::const_iterator citer_unord_map;

Dilatation::Dilatation(){}

Dilatation::Dilatation(Graph * graph) {
    this->graph = graph;
    //this->edgesIds = new std::unordered_map<PairOfInts,bool>();
    this->edgesIds = new std::map<PairOfInts,bool>();
}

void Dilatation::buildHashmap() {
    std::vector<Edge*> * listOfEdges = this->graph->getListOfEdges();
    PairOfInts pairOfInts;

    for(iter_vec_edge iter_edge = listOfEdges->begin(); iter_edge != listOfEdges->end(); ++iter_edge) {
        int firstVertexId = (*iter_edge)->getVertices()->first->getId();
        int secondVertexId = (*iter_edge)->getVertices()->second->getId();
        pairOfInts.first = firstVertexId;
        pairOfInts.second = secondVertexId;
        (*this->edgesIds)[pairOfInts]=true;

        pairOfInts.first = secondVertexId;
        pairOfInts.second = firstVertexId;
        (*this->edgesIds)[pairOfInts]=true;
    }
}

int Dilatation::getDilatation(Distributions distributions) {
      int myRank;
      int countOfProcesses;

      /* find out process rank */
      MPI_Comm_rank(MPI_COMM_WORLD, &myRank);

      /* find out number of processes */
      MPI_Comm_size(MPI_COMM_WORLD, &countOfProcesses);
    
    int flag;
    MPI_Status status;
    int LENGTH = 100;
    int tag=1;
    int position=1;
    int source = 0;
    int message[LENGTH];
    int requests[countOfProcesses];
    int distribs[countOfProcesses*2+2];
    int DIL_INDEX = 50;
    int DIL_BOUND_FOUND_INDEX = 51;
    int CPU_THAT_HAS_FOUND_BOUND_INDEX = 52;
    MPI_Request request;
    MPI_Request request_distr;
    
    int graphsDiameter = graph->getGraphsDiameter();
    int numberOfVertices = graph->getNumberOfVertices();
    int lowerBound = graph->getLowerBound();

    int * permutation = PermutationUtils::getNthPermutation(numberOfVertices, distributions.initialPermNumber);
    int graphDilatation = INT_MAX;

    int counterOfProcessedPerms = 0;
    for(int i = 0; i < countOfProcesses; ++i) {
       requests[i]=0;
    }
    
    bool doNotSendRequest = false;
    bool finished = false;
    while(!finished && distributions.countOfPemrsToProcess > 0) {
	bool showSendinFinishToOutput = true;
	
	do {
	      if(counterOfProcessedPerms < distributions.countOfPemrsToProcess && distributions.countOfPemrsToProcess > 0) {
		graphDilatation = improveDilatation(permutation, graphDilatation, numberOfVertices);
		counterOfProcessedPerms++;
	      }
	      if(lowerBoundReached(lowerBound, graphDilatation)) {
		    //broadcast send to all that lower bound has been found
		    message[DIL_BOUND_FOUND_INDEX] = 1;
		    message[DIL_INDEX] = graphDilatation;
		    message[CPU_THAT_HAS_FOUND_BOUND_INDEX] = myRank;
		    MPI_Bcast(&message, LENGTH, MPI_INT, myRank, MPI_COMM_WORLD);
		    for(int i = 0; i < countOfProcesses; ++i) {
			  if(showSendinFinishToOutput) {
			      printf("Sending a MPI_Isend - MSG_FINISH from cpu id: %d to cpu id: %d\n", myRank, i);
			  }
			  MPI_Isend(&message, LENGTH, MPI_INT, i, MSG_FINISH, MPI_COMM_WORLD,&request);
		    }
		    showSendinFinishToOutput = false;
		}
	    
	    if ((counterOfProcessedPerms % CHECK_MSG_AMOUNT)==0)
	    {
		MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag, &status);
		if (flag)
		{
		  MPI_Irecv(&message, LENGTH, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &request);
		  MPI_Irecv(&requests, LENGTH, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &request);
		  MPI_Irecv(&distribs, LENGTH, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &request_distr);
		  switch (status.MPI_TAG)
		  {
		    case MSG_WORK_REQUEST : { 
					  #ifdef DEBUG_STD_COUT
					    printf("msg_work_request prijal cpu s id: %d, requesty: ", myRank);

					    for(int i = 0; i < countOfProcesses; ++i) {
					    printf("%d, ", requests[i]);
					  }
					  printf("\n");
					  #endif
					  
					  if(requests[myRank] == 1) {
					    #ifdef DEBUG_STD_COUT
					    printf("Ukoncenie spracovania msg_work_request - requests[myRank] == 1 - by cpu id: %d\n", myRank);
					    #endif
					    break;
					  }
					  #ifdef DEBUG_STD_COUT
					  printf("\ncpu id: %d prijalo spravu MSG_WORK_REQUEST a procesory, ktore chcu pracu, maju ID: ", myRank);
					  #endif
					  std::vector<int> cpuIdThatWantToWork;
					  for(int i = 0; i < countOfProcesses; ++i) {
						if(requests[i] == 1) {
						    #ifdef DEBUG_STD_COUT
						    printf("%d ,", i);
						    #endif
						    cpuIdThatWantToWork.push_back(i);
						    requests[i] = 0;
						    //tu este odoslanie updatnuteho requests
						    MPI_Bcast(&requests, LENGTH, MPI_INT, myRank, MPI_COMM_WORLD);
						    for(int i = 0; i < countOfProcesses; ++i) {
							  #ifdef DEBUG_STD_COUT
							  printf("Sending updated a MPI_Isend - MSG_WORK_REQUEST from cpu id: %d to cpu id: %d\n", myRank, i);
							  #endif
							  MPI_Isend(&requests, countOfProcesses, MPI_INT, i, MSG_WORK_REQUEST, MPI_COMM_WORLD,&request);
						    }
						}
					  }
					  #ifdef DEBUG_STD_COUT
					  printf(" .\n");
					  #endif

					  if(cpuIdThatWantToWork.size() == countOfProcesses) {
					      for(int i = 0; i < countOfProcesses; ++i) {
						  printf("Sending a MPI_Isend - MSG_WORK_NOWORK from cpu id: %d to cpu id: %d\n", myRank, i);
						  MPI_Isend(&requests, countOfProcesses, MPI_INT, i, MSG_WORK_NOWORK, MPI_COMM_WORLD, &request);
					      }
					      #ifdef DEBUG_STD_COUT
					      printf("Ukoncenie spracovania msg_work_request - cpuIdThatWantToWork.size() == countOfProcesses - by cpu id: %d\n", myRank);
					      #endif
					      break;
					  }
					  if(cpuIdThatWantToWork.size() == 0) {
					    #ifdef DEBUG_STD_COUT
					    printf("Ukoncenie spracovania msg_work_request - cpuIdThatWantToWork.size() == 0 - by cpu id: %d\n", myRank);  
					    #endif
					    break;
					  }
					  
					  cpuIdThatWantToWork.push_back(myRank);
					  std::vector<Distributions> distributionsVector;
					  #ifdef DEBUG_STD_COUT
					  printf("CPU id, ktory bude pocitat distribucie: %d, dist.initPerm: %d, dist.countOfPemrsToProcess: %d, couterOfProcessedPerms: %d\n", 
						 myRank, distributions.initialPermNumber, distributions.countOfPemrsToProcess, counterOfProcessedPerms);
					  #endif
					  
					  distributionsVector = DistributionUtils::computeDistributions(cpuIdThatWantToWork,  
												  distributions.initialPermNumber + counterOfProcessedPerms,
												  0,
												  distributions.countOfPemrsToProcess - counterOfProcessedPerms, 
												  numberOfVertices);

					  bool noWork = false;
					  typedef std::vector<Distributions>::iterator iter_vec_distr;
					  for(iter_vec_distr iter = distributionsVector.begin(); iter != distributionsVector.end(); ++iter) {
					      //std::cout << "CPU ID: " << (*iter).cpuId << " Init perm number: " << (*iter).initialPermNumber << " Perms to process: " << (*iter).countOfPemrsToProcess << std::endl;
					      for(int i = 0; i < cpuIdThatWantToWork.size(); ++i) {
						  if(i == (*iter).cpuId) {
						      distribs[2*i] = (*iter).initialPermNumber;
						      distribs[2*i+1] = (*iter).countOfPemrsToProcess;
						      if((*iter).countOfPemrsToProcess == 0) {
							noWork = true;
							finished = true;
						      }
						  }
					      }
					  }
					  
					  /*if(noWork) {
					      for(int i = 0; i < cpuIdThatWantToWork.size(); ++i) {
						printf("Sending a MPI_Isend - MSG_FINISH from cpu id: %d to cpu id: %d 'cos of no work and to this cpu\n", myRank, i);
						MPI_Isend(&requests, LENGTH, MPI_INT, i, MSG_FINISH, MPI_COMM_WORLD, &request);
					    }
					    MPI_Isend(&requests, LENGTH, MPI_INT, myRank, MSG_FINISH, MPI_COMM_WORLD, &request);
					  }
					  else {*/
					    MPI_Bcast(&distribs, LENGTH, MPI_INT, myRank, MPI_COMM_WORLD);
					    for(int i = 0; i < cpuIdThatWantToWork.size(); ++i) {
						#ifdef DEBUG_STD_COUT
						printf("Sending a MPI_Isend - MSG_WORK_SENT from cpu id: %d to cpu id: %d\n", myRank, i);
						#endif
						MPI_Isend(&distribs, LENGTH, MPI_INT, i, MSG_WORK_SENT, MPI_COMM_WORLD, &request_distr);
					    }
					  //}
					  
					  //std::cout << "CPU  with ID: " << myRank << ", DILATATION: " << dilatation->getDilatation(distr) << std::endl;
					cpuIdThatWantToWork.clear();
					  			      
					    break;
		    }
		    case MSG_WORK_SENT : {
					  #ifdef DEBUG_STD_COUT
					  printf("cpu id: %d prijalo spravu MSG_WORK_SENT\n",myRank);
					  #endif
					  //permutation = PermutationUtils::getNthPermutation(numberOfVertices, distribs[2*myRank]);
					  distributions.countOfPemrsToProcess = distribs[2*myRank +1];
					  printf("from SENT statement: cpu id: %d, init perm num: %d, perms to count: %d\n", myRank, distribs[2*myRank], distribs[2*myRank+1]);
					  distribs[2*myRank] = 0;
					  distribs[2*myRank+1] = 0;
					  counterOfProcessedPerms = 0;
					  doNotSendRequest=true;
					  MPI_Bcast(&requests, LENGTH, MPI_INT, myRank, MPI_COMM_WORLD);
					  //toto tu riesit nebudem ale az za cyklom do
					  // prisel rozdeleny zasobnik, prijmout
					  // deserializovat a spustit vypocet
					  break;
		    }
		    case MSG_WORK_NOWORK : printf("cpu id: %d prijalo spravu MSG_WORK_NOWORK od: %d\n",myRank,source);
		    printf("CPU with ID: %d is finishing.\n", myRank);
		   // MPI_Finalize();
                           //exit (0);
					  finished = true;
					  break;
		    case MSG_FINISH : printf ("I have got a finish message, my rank is: %d\n", myRank);
				      if(message[DIL_BOUND_FOUND_INDEX] == 1) {
					  printf("nasiel sa low bound a DILATATION: %d\n", message[DIL_INDEX]);
				      }
				      printf("CPU with ID: %d is finishing.\n", myRank);
				     // MPI_Finalize();
				      //exit (0);
				      finished = true;
				      break;
		    default : printf("I have got an unknown message, my rank is: %d\n", myRank); break;
		  }
		}
	    }
	    //here should check a queue of requests for a job or check a message whether a lower bound has been found
	} while(std::next_permutation(permutation, permutation + numberOfVertices) 
	    && counterOfProcessedPerms != distributions.countOfPemrsToProcess
	       && !finished);
	
	//if some CPU will be here, it means the CPU has processed every assigned permutation
	//and it will send the result and a request for a job
	
	//cpu will send a request 
	if(!finished && !doNotSendRequest) {
	    MPI_Irecv(&requests, LENGTH, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &request);
	      #ifdef DEBUG_STD_COUT
	      printf("Setting requests[%d]=1\n", myRank);
	      #endif
	      requests[myRank]=1;
	      MPI_Bcast(&requests, LENGTH, MPI_INT, myRank, MPI_COMM_WORLD);
	      for(int i = 0; i < countOfProcesses; ++i) {
		  //if(i != myRank && requests[i] != 1) {
		    #ifdef DEBUG_STD_COUT
		    printf("Sending a MPI_Isend - MSG_WORK_REQUEST from cpu id: %d to cpu id: %d\n", myRank, i);
		    #endif
		    MPI_Isend(&requests, countOfProcesses, MPI_INT, i, MSG_WORK_REQUEST, MPI_COMM_WORLD,&request);
		  //}
	      }
	}
	
	
    } //while not finished
    
    printf("I am out! CPU with id: %d\n", myRank);
    
#ifdef DEBUG_STD_COUT
    writeInfoToCout(numberOfVertices, graphsDiameter, lowerBound);
#endif
    
    return graphDilatation;
}

void Dilatation::writeInfoToCout(int numberOfVertices, int graphsDiameter, int lowerBound) {
    std::cout << "\nNumber of vertices:\t" << numberOfVertices;
    std::cout << "\nGraph's diameter:\t" << graphsDiameter;
    std::cout << "\n\nGraph's lower bound:\t" << lowerBound;
}

bool Dilatation::lowerBoundReached(int lowerBound, int graphDilatation) {
    return (lowerBound == graphDilatation);
}

int Dilatation::improveDilatation(int * permutation, int graphDilatation, int numberOfVertices) {
    int maxDilatation = 0;
    PairOfInts pairOfInts;
    citer_unord_map endOfMap = this->edgesIds->end();
#ifdef DEBUG_STD_COUT
    std::cout << "Now processing permutation: \t\t\t";
    for(int i = 0; i < numberOfVertices; ++i) {
        std::cout << permutation[i];
    }
#endif

    for(int i = 0; i < numberOfVertices - 1; ++i) {
        for(int j = i+1; j < numberOfVertices; ++j) {
            pairOfInts.first = permutation[i];
            pairOfInts.second = permutation[j];
            citer_unord_map citer_map = (this->edgesIds->find(pairOfInts));
            if(citer_map != endOfMap) {
                if(j - i > maxDilatation) {
                    maxDilatation = j - i;
                }
            }
        }
    }
#ifdef DEBUG_STD_COUT
    std::cout << "\n\tComputed permutation's dilatation: \t" << maxDilatation;
#endif

    if(graphDilatation >= maxDilatation) {
        graphDilatation = maxDilatation;
    }
#ifdef DEBUG_STD_COUT
    std::cout << "\n\tMin graph's dilatation: \t\t" << graphDilatation << std::endl;
#endif
    return graphDilatation;
}

int * Dilatation::getFirstPermutation(int numberOfVertices) {
    int * permutation = new int[numberOfVertices];
    for(int i = 0; i < numberOfVertices; ++i) {
        permutation[i] = i;
    }

    return permutation;
}

Dilatation::~Dilatation() {
    delete this->edgesIds;
}
