#include <math.h>
#include <stdlib.h>
#include <unistd.h>

#include <iostream>

#ifdef GMPI
#include <mpi.h>
#endif

#include "gsolverBBPv3NoWaitMaster.h"

/**
 * A BBP solver : starts with jobs with last due dates
 *
 * Dans cette version 3 de la BBP, on demarre avec les jobs reordonnes dans le sens des dates dues croissantes. Mais
 * contrairement a l'exploration brutale de la version 2, cette fois on ne considere pas n'importe quel numero de batch.
 * Pour chaque job, on choisit de le rajouter soit dans un batch deja existant dont la capacite residuel est suffisante
 * pour stocker le nouveau job, soit dans un nouveau batch.
 *
 * Le nouveau batch est ordonnance avant les autres batchs deja crees.
 *
 * ATTENTION : Pour que cette solution soit optimale, sachant que l'on
 * part d'un probleme pour lequel les jobs sont classes par date due croissante, il est egalement necessaire que le cout de stockage
 * de tous les jobs soient identiques. Dans le cas contraire, la solution ainsi construite n'est plus certaine d'etre optimale.
 */
GSolverBBPv3NoWaitMaster::GSolverBBPv3NoWaitMaster(GSupplyLinkProblem& pb, int node_, int p_):GSolverBBPv3NoWait(pb,node_,p_) {
  // collectTimeStatisticsState = COLLECT_TIME_STATISTICS_STATE_START ;
  listSearchNodesMaxSize = nbrNodes * 4 ;
}



void GSolverBBPv3NoWaitMaster::solveInit() {
  GSolverBBPv3NoWait::solveInit() ;

  initBestSolutionWithFFD() ;

  // send New Best besteval To Slaves
  double eval = bestSolutionPermuted.getEvaluation() ;
#ifdef GMPI
  for (int rank=1;rank<nbrNodes;++rank) 
    MPI_Send(&eval, 1, MPI_DOUBLE, rank, BESTEVAL_FROM_MASTER_TAG, MPI_COMM_WORLD);
#endif

  double totalVolume = 0 ;
  for (int i=0;i<this->problem.getNP();++i)
    totalVolume+=this->problem.getJobSize(i) ;
  minBatch = int(totalVolume/this->problem.getTransporter(0).getCapacity()) ;
  if (minBatch*this->problem.getTransporter(0).getCapacity()<totalVolume) minBatch++ ;
  maxBatch = this->problem.getNP() ;

  this->currentSolutionPermuted.setNbrBatch(0) ;
}




#ifdef GMPI
/**
 *
 */
void GSolverBBPv3NoWaitMaster::testForBestEvalMsgFromSlave() {
  int flag ;

  cpuMPIComm.start() ;

  // Check for a new besteval
  MPI_Test(&request_bestevalfromslave, &flag, &status);
  while (flag) {
    bool better = false ;
    if ((int)bestEvalMPI[0]<bestSolutionPermuted.getNbrBatch()) 
      better = true ;
    else {
      if ((int)bestEvalMPI[0] == bestSolutionPermuted.getNbrBatch())  {
	if (bestEvalMPI[1] < bestSolutionPermuted.getEvaluationCustomer() ) 
	  better = true ;
      }
    }
    if (better) {
      bestSolutionPermuted.setNbrBatch((int)bestEvalMPI[0]) ;
      bestSolutionPermuted.setEvaluationCustomer(bestEvalMPI[1]) ;

      cout << "Master updating bestEvalMPI:nbrBatch=" << (int)bestEvalMPI[0] << " - customerHoldingCost=" << bestEvalMPI[1] << " !!!!!!!!!!!!!!" << endl ;

      // send New Best besteval To Slaves
      for (int rank=1;rank<nbrNodes;++rank) 
	MPI_Isend(&bestEvalMPI, 2, MPI_DOUBLE, rank, BESTEVAL_FROM_MASTER_TAG, MPI_COMM_WORLD, &request_besteval[rank]);

    }

    // on se remet en attente 
    MPI_Irecv(&bestEvalMPI, 2, MPI_DOUBLE, MPI_ANY_SOURCE, BESTEVAL_FROM_SLAVE_TAG, MPI_COMM_WORLD, &request_bestevalfromslave);
    MPI_Test(&request_bestevalfromslave, &flag, &status);
  }

  cpuMPIComm.stop() ;
}
#endif




#ifdef GMPI
/**
 * return 1 if All slaves have finished their work
 */
int GSolverBBPv3NoWaitMaster::testForEndWorkMsgFromSlave() {
  int flag, depthlevel=-1 ;
  double timesec=-1.0 ;

  cpuMPIComm.start() ;

  // Check for a new besteval msg from master
  MPI_Test(&request_endworkfromslave, &flag, &status);

  while (flag) {
    int slaverank = status.MPI_SOURCE ;
    slaveIsWaiting[slaverank] = 1 ;
    nbrWaitingSlaves++ ;

#ifdef GMPIDEBUG
    cout << "Master receives a ENDWORK msg from slave node" << slaverank << " ; nbrWaitingSlaves=" << nbrWaitingSlaves << endl ;
#endif

    // Stop chrono for this slave node 
    slaveChrono[slaverank].stop() ;
    depthlevel = slaveChrono[slaverank].depthLevel ;
    timesec = slaveChrono[slaverank].getTotalTimeSec() ;
    searchTreeStat[depthlevel].addStat(timesec) ;
    timesec = searchTreeStat[depthlevel].getMeanTime() ;

#ifdef GDEBUG
    // cout << ">> average running time for level=" << depthlevel << " : " << searchTreeStat[depthlevel].getMeanTime() << "sec." << endl ;
#endif

    if (searchTreeStat[depthlevel].getNbrStats()>10 && slaveChrono[slaverank].isActivated() && depthLevelForDispatching==depthlevel) {
#ifdef GADJUST_PERIOD_MPI_TEST
      // On change la durée de l'alarme pour s'adapter à 1/10e de la durée d'exploration 
      // Ex: on check toutes les secondes pour une durée moyenne de 10 secondes.
      if (searchTreeStat[depthlevel].getNbrStats()>MIN_NBR_STAT && timesec>RATIO_MEANTIME_PERIOD_MPI_TEST*alarmMPITestMaster.getAlarmDuration()) {
	double newduration = timesec/RATIO_MEANTIME_PERIOD_MPI_TEST ;
	if (newduration<1.0) newduration = 0.0 ;
	if (newduration>MAX_PERIOD_MPI_TEST) newduration = MAX_PERIOD_MPI_TEST ;
	alarmMPITestMaster.setAlarmDuration(newduration) ;
      }
#endif

      double threshold = cpuTotal.getRunningTime()/nbrNodes ;
      // MYMODIF +> ESSAYER AVEC LE MAX A LA PLACE DUMEAN
      if (searchTreeStat[depthlevel].getNbrStats()>10  && depthLevelForDispatching==depthlevel && timesec<threshold && depthLevelForDispatching>MIN_LEVEL_FOR_DISPATCHING) { // Increase the work for the slaves
	// minJobForDispatching && maxJobForDispatching 
	depthLevelForDispatching-- ;

	for (int s=0;s<MAX_SLAVES;++s) { slaveChrono[s].deactivate() ; }

#ifdef GDEBUG
	cout << "testForEndWorkMsgFromSlave() : NEW depthLevelForDispatching=" << depthLevelForDispatching << " (meanTime=" << timesec << "s for depthLevel=" << (depthLevelForDispatching+1) << ")" << endl ;
#endif
      }
    }

    assert (nbrWaitingSlaves<=maxWaitingSlaves) ;
    
    MPI_Irecv(0, 0, MPI_INT, MPI_ANY_SOURCE, ENDWORK_TAG, MPI_COMM_WORLD, &request_endworkfromslave);
    MPI_Test(&request_endworkfromslave, &flag, &status);

  }

  cpuMPIComm.stop() ;

  return (listSearchNodes.size()==0 && nbrWaitingSlaves==maxWaitingSlaves) ;
}
#endif





/**
 *
 */
void GSolverBBPv3NoWaitMaster::solveMain() {

  cpuTotal.start() ;

  bool newBestSolution = true ;
  bool workIsFinished  = false ;
  int flag=0;

#ifdef GMPI
  MPI_Status status;

  maxWaitingSlaves = nbrNodes - 1 ;
  nbrWaitingSlaves = maxWaitingSlaves ;
  for (int s=1;s<nbrNodes;++s) slaveIsWaiting[s] = 1 ;
#endif

  searchnode searchtreeroot ;
  searchtreeroot.nbrbatch = 0 ;
  searchtreeroot.job = this->problem.getNP()-1 ;
  searchtreeroot.sol = this->currentSolutionPermuted ;

#ifdef GMPI
  // Receiving best eval or work to do
  MPI_Irecv(&bestEvalMPI, 1, MPI_DOUBLE, MPI_ANY_SOURCE, BESTEVAL_FROM_SLAVE_TAG, MPI_COMM_WORLD, &request_bestevalfromslave);

  /* Receive ENDWORK from slave */
  MPI_Irecv(0, 0, MPI_INT, MPI_ANY_SOURCE, ENDWORK_TAG, MPI_COMM_WORLD, &request_endworkfromslave);
#endif

  // Single process and multi process
  cpuComputing.start() ;
  alarmMPITestMaster.rearm() ;

  depthFirstSearch(searchtreeroot) ;

  cpuComputing.stop() ;
#ifdef GDEBUG
  cout << "###################################################################################" << endl ;
  cout << "Master node"<<node<<" stopping computation at " << cpuComputing.getStopTimeMs() << "s"  <<endl ;
  cout << "###################################################################################" << endl ;
#endif

#ifdef GMPI
  while (! workIsFinished) {
    dispatchWork() ;

    // Work for master
    if (listSearchNodes.size()>0) {
      searchnode treenodechild = listSearchNodes.front() ;
      listSearchNodes.pop_front() ;
      depthFirstSearch(treenodechild) ;
    }

    
    testForBestEvalMsgFromSlave() ;

    // MPI_Test(&request_worktodo, &flag, &status) ;
    // if (flag) {
    //   depthFirstSearchMaster(treenode) ;
    // }

    workIsFinished = testForEndWorkMsgFromSlave() ;
  }

  // Tell all the slaves to exit by sending an empty message with the DIE_TAG.
  cpuMPIComm.start() ;
  for (int rank=1;rank<nbrNodes;++rank) {
    MPI_Send(0, 0, MPI_INT, rank, DIE_TAG, MPI_COMM_WORLD);
  }
  cpuMPIComm.stop() ;
#endif

  cpuTotal.stop() ;

}




#ifdef GMPI
/** #############################################################################
 *
 *  dispatch Work to Slave until no slave are free or all the work has been dispatched
 *
 *  #############################################################################
 */
int GSolverBBPv3NoWaitMaster::pushWork(searchnode& treenodechild) {
  int returnvalue = WORK_NOT_DISPATCHED ;

  int depthlevelchild = problem.getNP() - treenodechild.job ;
  
  // First condition to dispatch work : depthlevel gives enough work to slave and the queue of work is not full 
  if ( (depthlevelchild==depthLevelForDispatching) && (listSearchNodes.size()<listSearchNodesMaxSize)) {
    listSearchNodes.push_back(treenodechild) ;
    returnvalue = WORK_DISPATCHED ;
  }

  return returnvalue ;
}


/** #############################################################################
 *
 *  dispatch Work to Slave until no slave are free or all the work has been dispatched
 *
 *  #############################################################################
 */
int GSolverBBPv3NoWaitMaster::dispatchWork() {
  int returnvalue = WORK_NOT_DISPATCHED ;

  cpuMPIComm.start() ;

  /*
    class searchnode{
    int nbrbatch ; // number of batches
    int job ;      // job to place
    vector<double> tabBatchRemainingCapacity ;
    OGSupplyLinkSolutionPartial sol ;
    } 
  */
  while (nbrWaitingSlaves>0 && listSearchNodes.size()>0) {
    searchnode treenodechild = listSearchNodes.front() ;

    // Find a free slave
    int rank = 1 ;
    while (!slaveIsWaiting[rank] && rank<nbrNodes) rank++ ;

    if (rank<nbrNodes) {
      listSearchNodes.pop_front() ;

      nbrWaitingSlaves-- ;

      slaveIsWaiting[rank] = 0 ;
      // 2 INT : nbrbatch + job
      buffer[0] = treenodechild.nbrbatch ;
      buffer[1] = treenodechild.job ;

      MPI_Send(buffer,2,MPI_INT,rank,WORK1_TAG,MPI_COMM_WORLD) ;

      // DOUBLE ARRAY : tabBatchRemainingCapacity + bestEvaluation
      double ibuffer2[treenodechild.nbrbatch+2] ;
      for (int i=0;i<treenodechild.nbrbatch;++i)
	ibuffer2[i] = treenodechild.tabBatchRemainingCapacity[i] ;
      ibuffer2[treenodechild.nbrbatch] = (double)bestSolutionPermuted.getNbrBatch() ;
      ibuffer2[treenodechild.nbrbatch+1] = bestSolutionPermuted.getEvaluationCustomer() ;

      MPI_Send(ibuffer2,treenodechild.nbrbatch+1,MPI_DOUBLE,rank,WORK2_TAG,MPI_COMM_WORLD) ;

      // INT ARRAY : indice of batch for each job
      int size3 = problem.getNP()-treenodechild.job ;
      int ibuffer3[size3] ;
      for (int i=treenodechild.job+1;i<problem.getNP();++i) {
	ibuffer3[i-treenodechild.job-1] = (treenodechild.sol).getBatchIndice(i) ;
      }

      MPI_Send(ibuffer3,size3,MPI_INT,rank,WORK3_TAG,MPI_COMM_WORLD) ;

      // Start chrono for this slave node and memorize the corresponding depth level of the searchtree node
      slaveChrono[rank].depthLevel = problem.getNP() - treenodechild.job ;
      slaveChrono[rank].reset() ;
      slaveChrono[rank].start() ;

      returnvalue = WORK_DISPATCHED ;

#ifdef GDEBUG
      // cout << "Master dispatching new work to slave " << rank << " (depthLevel=" <<slaveChrono[rank].depthLevel << " ; chrono = " << slaveChrono[rank].getRunningTime() <<")"<< endl ;
      // if (collectTimeStatisticsState==COLLECT_TIME_STATISTICS_STATE_START) 
      //   cout << ">> starting collecting time statistics for slave node rank " << rank << endl ;
      // else
      //   cout << ">> cannot collect time stats : collectTimeStatisticsState=" << collectTimeStatisticsState << " != START (=" << COLLECT_TIME_STATISTICS_STATE_START << ")" << endl ;
#endif

    }
    
  }

#ifdef GDEBUG
  // cout << "dispatchWork() : listSearchNodes.size=" << listSearchNodes.size() << " / max=" << listSearchNodesMaxSize << endl ;
#endif

  cpuMPIComm.stop() ;

  return returnvalue ;
}



/**
 * 
 */
void GSolverBBPv3NoWaitMaster::testForMPIComm() {
  if (alarmMPITestMaster.testAndRearmAlarm()) { // Msg from slaves ?
    // Check for a new besteval
    testForBestEvalMsgFromSlave() ;
    testForEndWorkMsgFromSlave() ;   
    dispatchWork() ;

  }
}


/**
 * 
 */
void GSolverBBPv3NoWaitMaster::sendMPICommForBestSolution(double nbrbatch, double customerholdingcost) {
  // send New Best besteval To Slaves
  cpuMPIComm.start() ;

  double data[2] ;

  data[0] = nbrbatch ;
  data[1] = customerholdingcost ;
  
  for (int rank=1;rank<nbrNodes;++rank)   
    MPI_Isend(&data, 2, MPI_DOUBLE, rank, BESTEVAL_FROM_MASTER_TAG, MPI_COMM_WORLD, &request_besteval[rank]);
  
  cpuMPIComm.stop() ;
}

#endif





