#include <math.h>
#include <stdlib.h>
#include <unistd.h>

#include <iostream>

#include <mpi.h>

#include "gsolverBBPv3NoWaitSlave.h"

/**
 * A BBP solver : starts with jobs with last due dates
 *
 * Dans cette version 3 de la BBP, on demarre avec les jobs reordonnes dans le sens des dates dues croissantes. Mais
 * contrairement a l'exploration brutale de la version 2, cette fois on ne considere pas n'importe quel numero de batch.
 * Pour chaque job, on choisit de le rajouter soit dans un batch deja existant dont la capacite residuel est suffisante
 * pour stocker le nouveau job, soit dans un nouveau batch.
 *
 * Le nouveau batch est ordonnance avant les autres batchs deja crees.
 *
 * ATTENTION : Pour que cette solution soit optimale, sachant que l'on
 * part d'un probleme pour lequel les jobs sont classes par date due croissante, il est egalement necessaire que le cout de stockage
 * de tous les jobs soient identiques. Dans le cas contraire, la solution ainsi construite n'est plus certaine d'etre optimale.
 */
GSolverBBPv3NoWaitSlave::GSolverBBPv3NoWaitSlave(GSupplyLinkProblem& pb, int node_, int p_):GSolverBBPv3NoWait(pb,node_,p_){
}





void GSolverBBPv3NoWaitSlave::solveInit() {
  GSolverBBPv3NoWait::solveInit() ;

  int eval = -1 ;

  double totalVolume = 0 ;
  for (int i=0;i<this->problem.getNP();++i)
    totalVolume+=this->problem.getJobSize(i) ;
  minBatch = int(totalVolume/this->problem.getTransporter(0).getCapacity()) ;
  if (minBatch*this->problem.getTransporter(0).getCapacity()<totalVolume) minBatch++ ;
  maxBatch = this->problem.getNP() ;

  this->currentSolutionPermuted.setNbrBatch(0) ;

}




/**
 *
 */
int GSolverBBPv3NoWaitSlave::testForDieMsgFromMaster() {
  int flag ;

  cpuMPIComm.start() ;

  // Check for a new besteval msg from master
  MPI_Test(&request_diefrommaster, &flag, &status);

  cpuMPIComm.stop() ;

  return flag ;
}

/**
 *
 */
void GSolverBBPv3NoWaitSlave::testForBestEvalMsgFromMaster() {
  int flag ;

  cpuMPIComm.start() ;

  // Check for a new besteval msg from master
  MPI_Test(&request_bestevalfrommaster, &flag, &status);
  while (flag) {
    bool better = false ;
    if ((int)bestEvalMPI[0]<bestSolutionPermuted.getNbrBatch()) 
      better = true ;
    else {
      if ((int)bestEvalMPI[0] == bestSolutionPermuted.getNbrBatch())  {
	if (bestEvalMPI[1] < bestSolutionPermuted.getEvaluationCustomer() ) 
	  better = true ;
      }
    }
    if (better) {
      bestSolutionPermuted.setNbrBatch((int)bestEvalMPI[0]) ;
      bestSolutionPermuted.setEvaluationCustomer(bestEvalMPI[1]) ;
#ifdef GMPIDEBUG
      cout << "Slave node" << node << " received best eval from Master: nbrBatch=" << (int)bestEvalMPI[0] << " - customerHoldingCost=" << bestEvalMPI[1] << endl ;
#endif
    }

    // on se remet en attente 
    MPI_Irecv(&bestEvalMPI, 2, MPI_DOUBLE, 0, BESTEVAL_FROM_MASTER_TAG, MPI_COMM_WORLD, &request_bestevalfrommaster);
    MPI_Test(&request_bestevalfrommaster, &flag, &status);
  }

  cpuMPIComm.stop() ;

}


/**
 *
 */
int GSolverBBPv3NoWaitSlave::testForWorkMsgFromMaster(int* buffer, searchnode& treenode) {
  int flag=0 ;

  cpuMPIComm.start() ;

  // Check for a work to do from master
  int errcode = MPI_Test(&request_workfrommaster, &flag, &status);
  if (errcode==MPI_ERR_REQUEST) {
    cout << "GSolverBBPv3NoWaitSlave::testForBestEvalMsgFromMaster() ERROR MPI_ERR_REQUEST" << endl << flush ;
    cout << "Invalid MPI_Request. Either null or, in the case of a MPI_Start or MPI_Startall, not a persistent request. " << endl << flush ;
  }
  if (flag) {
    treenode.sol.setNbrBatch(buffer[0]) ;
    treenode.nbrbatch = buffer[0] ;
    treenode.job = buffer[1] ;

    // DOUBLE ARRAY : tabBatchRemainingCapacity + bestEvaluation
    double ibuffer2[treenode.nbrbatch+2] ;

    MPI_Recv(ibuffer2, treenode.nbrbatch+1, MPI_DOUBLE, 0, WORK2_TAG, MPI_COMM_WORLD, &status);

    treenode.tabBatchRemainingCapacity.clear() ;
    for (int i=0;i<treenode.nbrbatch;++i)
      treenode.tabBatchRemainingCapacity.push_back(ibuffer2[i]) ;
  
    bestSolutionPermuted.setNbrBatch((int)ibuffer2[treenode.nbrbatch]) ;
    bestSolutionPermuted.setEvaluationCustomer(ibuffer2[treenode.nbrbatch+1]) ;

    // INT ARRAY : indice of batch for each job
    int size3 = problem.getNP()-treenode.job ;
    int ibuffer3[size3] ;

    MPI_Recv(ibuffer3, size3, MPI_INT, 0, WORK3_TAG, MPI_COMM_WORLD, &status);
    for (int i=treenode.job+1;i<problem.getNP();++i) {
      treenode.sol.setBatchIndice(i, ibuffer3[i-treenode.job-1]) ;
      treenode.sol.setDeliveryCompletionTime(i, ibuffer3[i-treenode.job-1]) ;
    }

    // on se remet en attente 
    MPI_Irecv(buffer, 2, MPI_INT, 0, WORK1_TAG, MPI_COMM_WORLD, &request_workfrommaster);
  }

  cpuMPIComm.stop() ;

  return flag ;
}



/**
 *   SLAVE
 */
void GSolverBBPv3NoWaitSlave::solveMain() {
  searchnode treenode ;
  double result;

  // Mean processing time to dynamically adjust the frequence of MPI Comm
  double totTime=0.0 ;
  int nbrTime = 0 ;

  cpuTotal.start() ;

  cpuMPIComm.start() ;

  // on se remet en attente d'un envoi de besteval du master vers slave
  MPI_Irecv(&bestEvalMPI, 2, MPI_DOUBLE, 0, BESTEVAL_FROM_MASTER_TAG, MPI_COMM_WORLD, &request_bestevalfrommaster);

  /* Receive a message from the master */
  MPI_Irecv(buffer, 2, MPI_INT, 0, WORK1_TAG, MPI_COMM_WORLD, &request_workfrommaster);

  /* Receive DIE from the master */
  MPI_Irecv(0, 0, MPI_INT, 0, DIE_TAG, MPI_COMM_WORLD, &request_diefrommaster);

  cpuMPIComm.stop() ;

  while (!testForDieMsgFromMaster()) {
    // Wait for 10 millisec for a searchtree root to explore
    //usleep(10000) ;

    // Check for a new besteval msg from master
    testForBestEvalMsgFromMaster() ;

    
    // // Check for a new work msg from master
    if (testForWorkMsgFromMaster(buffer, treenode)) {
      
       /* Do the work and get the best eval */
      cpuComputing.start() ;
      alarmMPITestSlave.rearm() ;
      depthFirstSearch(treenode) ;
      cpuComputing.stop() ;      

#ifdef GADJUST_PERIOD_MPI_TEST
      double timesec = cpuComputing.getRunningTime() ;
      totTime += timesec ;
      nbrTime++ ;
      double meantime = totTime/nbrTime ;
      if (nbrTime>=MIN_NBR_STAT && meantime>RATIO_MEANTIME_PERIOD_MPI_TEST*alarmMPITestSlave.getAlarmDuration()) {
	totTime = 0.0 ; nbrTime = 0 ;
	double newduration = meantime/RATIO_MEANTIME_PERIOD_MPI_TEST ;
	if (newduration<1.0) newduration = 0.0 ;
	if (newduration>MAX_PERIOD_MPI_TEST) newduration = MAX_PERIOD_MPI_TEST ;
	alarmMPITestSlave.setAlarmDuration(newduration) ;
      }
#endif

       // Alert master that work is finished
#ifdef GMPIDEBUG
       cout << "Slave node "<<node<<" sending a ENDWORK msg to master" << endl ;
#endif
       cpuMPIComm.start() ;
       MPI_Send(0,0,MPI_INT,0,ENDWORK_TAG,MPI_COMM_WORLD) ;
       cpuMPIComm.stop() ;
    }

  }

  cpuTotal.stop() ;
}


#ifdef GMPI
/**
 * 
 */
void GSolverBBPv3NoWaitSlave::testForMPIComm() {
  if (alarmMPITestMaster.testAndRearmAlarm()) { // Msg from slaves ?
    // Check for a new besteval
    testForBestEvalMsgFromMaster() ;
  }
}


/**
 * 
 */
void GSolverBBPv3NoWaitSlave::sendMPICommForBestSolution(double nbrbatch, double customerholdingcost) {
  cpuMPIComm.start() ;
  double data[2] ;

  data[0] = nbrbatch ;
  data[1] = customerholdingcost ;

  MPI_Isend(&data, 2, MPI_DOUBLE, 0, BESTEVAL_FROM_SLAVE_TAG, MPI_COMM_WORLD, &request_bestevaltomaster);
  cpuMPIComm.stop() ;
}

#endif

