#include "Dwarf.Unmanaged.Mpi.h"

using namespace std;

// The settings for this dwarf.
static Settings* settings;

// Get settings from the command line.
static int getSettings(int argc, char** argv, Solver *solver) 
{
    int error = 0;
    if (solver->isRootThread) 
    {
        settings = new Settings();
        if (settings->init(argc,argv,solver)) 
        {            
            delete settings;
            error = 1;
        }
    }
    //Distribute error if that occurred while parsing.
    MPI_Bcast(&error, 1, MPI_INT, NUMBER_ROOT_PROCESS, MPI_COMM_WORLD);
    if (error == 1 ) return -1;

    int n;
    int m;
    int t;
    if(solver->isRootThread)
    {
        n = solver->hmm->getNumberOfStates();
        m = solver->hmm->getNumberOfObservation();
        t = solver->vit->getLengthOfObservationSequence();
    }
    //Distribute the number of states.
    MPI_Bcast(&n, 1, MPI_INT, NUMBER_ROOT_PROCESS, MPI_COMM_WORLD);
    //Distribute the number of observation.
    MPI_Bcast(&m, 1, MPI_INT, NUMBER_ROOT_PROCESS, MPI_COMM_WORLD);
    //Distribute the length of observation sequence.
    MPI_Bcast(&t, 1, MPI_INT, NUMBER_ROOT_PROCESS, MPI_COMM_WORLD);
    //Initializing of slave instances
    if(!solver->isRootThread)
    {
        solver->init(n,m,t);
    }

    //Distribute  the A and B matrices to all instances
    int lengthOfStates = solver->hmm->getNumberOfStates();
    for( int i = 0 ; i < lengthOfStates; i++ )
    {
        MPI_Bcast(solver->hmm->stateTransitionMatrix[i], lengthOfStates, MPI_DOUBLE, NUMBER_ROOT_PROCESS, MPI_COMM_WORLD);
        MPI_Bcast(solver->hmm->observationProbabilityMatrix[i], solver->hmm->getNumberOfObservation(), MPI_DOUBLE, NUMBER_ROOT_PROCESS, MPI_COMM_WORLD);
    }

    //Distribute  O vector to all instances.
    MPI_Bcast(solver->vit->observationSequence, solver->vit->getLengthOfObservationSequence(), MPI_INT, NUMBER_ROOT_PROCESS, MPI_COMM_WORLD);

    /*Set up of balance*/
    if(solver->commRank  == (solver->commSize - 1) )
    {
        solver->lengthForProc = solver->hmm->getNumberOfStates() - (solver->hmm->getNumberOfStates() / solver->commSize) * solver->commRank;  
        solver->startIndx = solver->hmm->getNumberOfStates() - solver->lengthForProc;
        solver->endIndx = solver->hmm->getNumberOfStates();
    } 
    else
    {
        solver->lengthForProc = solver->hmm->getNumberOfStates() / solver->commSize;
        solver->startIndx = solver->lengthForProc * solver->commRank;
        solver->endIndx = solver->lengthForProc * (solver->commRank + 1);
    }
    int shift = solver->startIndx;
    //Distribute  of shifts to all processors.
    MPI_Gather(&shift, 1, MPI_INT, solver->arrayOfShifts, 1, MPI_INT, NUMBER_ROOT_PROCESS, MPI_COMM_WORLD);
    MPI_Bcast(solver->arrayOfShifts, solver->commSize, MPI_INT, NUMBER_ROOT_PROCESS , MPI_COMM_WORLD);
    //Distribute of work's length to all processors.
    MPI_Gather(&solver->lengthForProc, 1, MPI_INT, solver->arrayOfProccLengths, 1, MPI_INT, NUMBER_ROOT_PROCESS, MPI_COMM_WORLD);
    MPI_Bcast(solver->arrayOfProccLengths, solver->commSize, MPI_INT, NUMBER_ROOT_PROCESS , MPI_COMM_WORLD);

    /*End of setup*/
    return 0;
}

void main(int argc, char** argv)
{   
    // initialize MPI
    MPI_Init(&argc, &argv); 

    Solver* solver = new Solver();
    if (getSettings(argc, argv, solver)) 
    {
        delete solver;
        exit(-1);
    }


    MPI_Barrier(MPI_COMM_WORLD);

    if (solver->isRootThread) 
    {
        settings->start();              // Start new time count.
    }

    solver->solve();                    // Solve the current problem.

    MPI_Barrier(MPI_COMM_WORLD);

    if (solver->isRootThread) 
    {
        settings->finish(solver);       // Stop the time count and write results. 
        delete settings;
    } 
    MPI_Finalize();
    delete solver;
}
