
#include <math.h>
#include <stdlib.h>
#include <unistd.h>

#include <iostream>

#ifdef GMPI
#include <mpi.h>
#endif

#include "gsolverBBPv3NoWaitMaster.h"

/**
 * A BBP solver : starts with jobs with last due dates
 *
 * Dans cette version 3 de la BBP, on demarre avec les jobs reordonnes dans le sens des dates dues croissantes. Mais
 * contrairement a l'exploration brutale de la version 2, cette fois on ne considere pas n'importe quel numero de batch.
 * Pour chaque job, on choisit de le rajouter soit dans un batch deja existant dont la capacite residuel est suffisante
 * pour stocker le nouveau job, soit dans un nouveau batch.
 *
 * Le nouveau batch est ordonnance avant les autres batchs deja crees.
 *
 * ATTENTION : Pour que cette solution soit optimale, sachant que l'on
 * part d'un probleme pour lequel les jobs sont classes par date due croissante, il est egalement necessaire que le cout de stockage
 * de tous les jobs soient identiques. Dans le cas contraire, la solution ainsi construite n'est plus certaine d'etre optimale.
 */
GSolverBBPv3NoWaitMaster::GSolverBBPv3NoWaitMaster(GSupplyLinkProblem& pb, int node_, int p_):GSolverBBPv3NoWait(pb,node_,p_) {
}



void GSolverBBPv3NoWaitMaster::solveInit() {
  GSolverBBPv3NoWait::solveInit() ;

  initBestSolutionWithFFD() ;

  // send New Best besteval To Slaves
  double eval = bestSolutionDueDateOrdered.getEvaluation() ;
#ifdef GMPI
  for (int rank=1;rank<nbrNodes;++rank) 
    MPI_Send(&eval, 1, MPI_DOUBLE, rank, BESTEVAL_FROM_MASTER_TAG, MPI_COMM_WORLD);
#endif

  double totalVolume = 0 ;
  for (int i=0;i<this->problem.getNP();++i)
    totalVolume+=this->problem.getJobSize(i) ;
  minBatch = int(totalVolume/this->problem.getTransporter(0).getCapacity()) ;
  if (minBatch*this->problem.getTransporter(0).getCapacity()<totalVolume) minBatch++ ;
  maxBatch = this->problem.getNP() ;

  this->currentSolutionDueDateOrdered.setNbrBatch(0) ;
}


#ifdef GMPI
/**
 *
 */
void GSolverBBPv3NoWaitMaster::testForBestEvalMsgFromSlave() {
  int flag ;

  cpuMPIComm.start() ;

  // Check for a new besteval
  MPI_Test(&request_bestevalfromslave, &flag, &status);
  while (flag) {
#ifdef GDEBUG
      cout << "Master receiving BestEval Msg : " << bestEvalMPI << endl ;
#endif

    if (bestEvalMPI<bestSolutionDueDateOrdered.getEvaluation()) {
      bestSolutionDueDateOrdered.getEvaluation() = bestEvalMPI ;
      cout << "Master updating bestEvalMPI=" << bestEvalMPI << " !!!!!!!!!!!!!!!!!!!!!!!!" << endl ;

      // send New Best besteval To Slaves
      for (int rank=1;rank<nbrNodes;++rank) 
        MPI_Isend(&bestEvalMPI, 1, MPI_DOUBLE, rank, BESTEVAL_FROM_MASTER_TAG, MPI_COMM_WORLD, &request_besteval[rank]);

    }

    // on se remet en attente 
    MPI_Irecv(&bestEvalMPI, 1, MPI_DOUBLE, MPI_ANY_SOURCE, BESTEVAL_FROM_SLAVE_TAG, MPI_COMM_WORLD, &request_bestevalfromslave);
    MPI_Test(&request_bestevalfromslave, &flag, &status);
  }

  cpuMPIComm.stop() ;
}
#endif


#ifdef GMPI
/**
 *
 */
int GSolverBBPv3NoWaitMaster::testForEndWorkMsgFromSlave() {
  int flag ;

  cpuMPIComm.start() ;

  // Check for a new besteval msg from master
  MPI_Test(&request_endworkfromslave, &flag, &status);

  while (flag) {
    int slaverank = status.MPI_SOURCE ;
    slaveIsWaiting[slaverank] = 1 ;
    nbrWaitingSlaves++ ;

#ifdef GDEBUG
    cout << "Master receives a ENDWORK msg from slave node" << slaverank << " ; nbrWaitingSlaves=" << nbrWaitingSlaves << endl ;
#endif

    assert (nbrWaitingSlaves<=maxWaitingSlaves) ;
    
    MPI_Irecv(0, 0, MPI_INT, MPI_ANY_SOURCE, ENDWORK_TAG, MPI_COMM_WORLD, &request_endworkfromslave);
    MPI_Test(&request_endworkfromslave, &flag, &status);
  }

  cpuMPIComm.stop() ;

  return (nbrWaitingSlaves==maxWaitingSlaves) ;
}
#endif



/**
 *
 */
void GSolverBBPv3NoWaitMaster::solveMain() {

  cpuTotal.start() ;

  bool newBestSolution = true ;
  bool workIsFinished  = false ;
  int flag=0;

#ifdef GMPI
  MPI_Status status;

  maxWaitingSlaves = nbrNodes - 1 ;
  nbrWaitingSlaves = maxWaitingSlaves ;
  for (int s=1;s<nbrNodes;++s) slaveIsWaiting[s] = 1 ;
#endif

  searchnode searchtreeroot ;
  searchtreeroot.nbrbatch = 0 ;
  searchtreeroot.job = this->problem.getNP()-1 ;
  searchtreeroot.sol = this->currentSolutionDueDateOrdered ;

#ifdef GMPI
  // Receiving best eval or work to do
  MPI_Irecv(&bestEvalMPI, 1, MPI_DOUBLE, MPI_ANY_SOURCE, BESTEVAL_FROM_SLAVE_TAG, MPI_COMM_WORLD, &request_bestevalfromslave);

  /* Receive ENDWORK from slave */
  MPI_Irecv(0, 0, MPI_INT, MPI_ANY_SOURCE, ENDWORK_TAG, MPI_COMM_WORLD, &request_endworkfromslave);
#endif

  // Single process and multi process
  cpuComputing.start() ;
  alarmMPITestMaster.rearm() ;
  depthFirstSearch(searchtreeroot) ;
  cpuComputing.stop() ;
  cout << "Master node"<<node<<" stopping computation at " << cpuComputing.getStopTimeMs() << "s"  <<endl ;


#ifdef GMPI
  while (! workIsFinished) {
    testForBestEvalMsgFromSlave() ;

    // MPI_Test(&request_worktodo, &flag, &status) ;
    // if (flag) {
    //   depthFirstSearchMaster(treenode) ;
    // }

    workIsFinished = testForEndWorkMsgFromSlave() ;
  }

  // Tell all the slaves to exit by sending an empty message with the DIE_TAG.
  cpuMPIComm.start() ;
  for (int rank=1;rank<nbrNodes;++rank) {
    MPI_Send(0, 0, MPI_INT, rank, DIE_TAG, MPI_COMM_WORLD);
  }
  cpuMPIComm.stop() ;
#endif

  cpuTotal.stop() ;

}

#ifdef GMPI
/**
 *
 * @return : 0 if a waiting slave has been found, 1 if not
 */
int GSolverBBPv3NoWaitMaster::dispatchWork(searchnode& treenodechild) {
  int returnvalue = WORK_NOT_DISPATCHED ;
  if (treenodechild.job>=minJobForDispatching && treenodechild.job<=maxJobForDispatching && nbrWaitingSlaves>0) {
    cpuMPIComm.start() ;
    
    /*
      class searchnode{
      int nbrbatch ; // number of batches
      int job ;      // job to place
      vector<double> tabBatchRemainingCapacity ;
      GSupplyLinkSolutionPartial sol ;
      } 
    */
    int found = 0 ;
    int rank = 1 ;
    while (!found && rank<nbrNodes) {
      if (!slaveIsWaiting[rank]) {
	rank++ ;
      }
      else {
	found=1 ;
      }
    } 
    
    if (found) {
      
#ifdef GDEBUG
      cout << "Master dispatching new work to slave " << rank << endl ;
#endif
      
      nbrWaitingSlaves-- ;
      slaveIsWaiting[rank] = 0 ;
      // 2 INT : nbrbatch + job
      buffer[0] = treenodechild.nbrbatch ;
      buffer[1] = treenodechild.job ;
      
      MPI_Send(buffer,2,MPI_INT,rank,WORK1_TAG,MPI_COMM_WORLD) ;
      
      // DOUBLE ARRAY : tabBatchRemainingCapacity + bestEvaluation
      double ibuffer2[treenodechild.nbrbatch+1] ;
      for (int i=0;i<treenodechild.nbrbatch;++i)
	ibuffer2[i] = treenodechild.tabBatchRemainingCapacity[i] ;
      ibuffer2[treenodechild.nbrbatch] = bestSolutionDueDateOrdered.getEvaluation() ;
      
      MPI_Send(ibuffer2,treenodechild.nbrbatch+1,MPI_DOUBLE,rank,WORK2_TAG,MPI_COMM_WORLD) ;
      
      // INT ARRAY : indice of batch for each job
      int size3 = problem.getNP()-treenodechild.job ;
      int ibuffer3[size3] ;
      for (int i=treenodechild.job+1;i<problem.getNP();++i) {
	ibuffer3[i-treenodechild.job-1] = (treenodechild.sol).getBatchIndice(i) ;
      }
      
      MPI_Send(ibuffer3,size3,MPI_INT,rank,WORK3_TAG,MPI_COMM_WORLD) ;
      
      returnvalue = WORK_DISPATCHED ;
    }
    
    cpuMPIComm.stop() ;
  }

  return returnvalue ;
}



/**
 * 
 */
void GSolverBBPv3NoWaitMaster::testForMPIComm() {
  if (alarmMPITestMaster.testAndRearmAlarm()) { // Msg from slaves ?
    // Check for a new besteval
    testForBestEvalMsgFromSlave() ;
    testForEndWorkMsgFromSlave() ;
  }
}


/**
 * 
 */
void GSolverBBPv3NoWaitMaster::sendMPICommForBestSolution(double eval) {
  // send New Best besteval To Slaves
  cpuMPIComm.start() ;
  
  for (int rank=1;rank<nbrNodes;++rank)   
    MPI_Isend(&eval, 1, MPI_DOUBLE, rank, BESTEVAL_FROM_MASTER_TAG, MPI_COMM_WORLD, &request_besteval[rank]);
  
  cpuMPIComm.stop() ;
}

#endif





