#include "Dwarf.Unmanaged.Hybrid.h"

//Init arrays.
Solver :: Solver()
{
    val = 0;
    col_ind = 0;
    row_ptr = 0;
    vector = 0;
    result = 0;

    rowNum = 0;
    valNum = 0;

    int argc = 0;
    char **argv;
    MPI_Init(&argc, &argv);                             // Init MPI

    MPI_Comm_rank(MPI_COMM_WORLD, &commRank);           // Retrieve rank of the process
    MPI_Comm_size(MPI_COMM_WORLD, &commSize);           // Retrieve count of processes

    isRootThread = commRank == NUMBER_ROOT_PROCESS;
}

//Dispose arrays.
Solver :: ~Solver()
{
    MPI_Finalize();                                     //Dispose MPI

    if (isRootThread) 
    {
        delete result;
        delete row_ptr;
    }
    delete val;
    delete col_ind;
    delete vector;    
}

// MPI and openMP based method for sparse matrix-vector multiplication.
void Solver::solve() 
{    
    int rowNumSend = 0;     //Number of sent rows.
    int rowNumRecv = 0;     //Number of recieved rows.
    MPI_Status status;      //Status of receiving.
    
    int nonZeroNum;
    double tempResult;
    
    if (isRootThread)
    {
        //Distribute different rows to each process
        for (int i = 1; i < commSize; i ++) 
        {
            MPI_Send(&rowNumSend, 1, MPI_INT, i, i, MPI_COMM_WORLD);

            if (rowNumSend < rowNum) 
            {            
                nonZeroNum = row_ptr[rowNumSend + 1] - row_ptr[rowNumSend];
                MPI_Send(&nonZeroNum, 1, MPI_INT, i, i, MPI_COMM_WORLD);
                MPI_Send(&val[row_ptr[rowNumSend] - 1], nonZeroNum, MPI_DOUBLE, i, i, MPI_COMM_WORLD);
                MPI_Send(&col_ind[row_ptr[rowNumSend] - 1], nonZeroNum, MPI_INT, i, i, MPI_COMM_WORLD);

                rowNumSend ++;
            }
        }

        //Loop for all rows.
        while(rowNumRecv < rowNum) 
        {
            //Receive resulted row.
            MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);

            MPI_Recv(
                &result[status.MPI_TAG], 
                1, 
                MPI_DOUBLE, status.MPI_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
            rowNumRecv ++;

            //Send current row to released process.
            MPI_Send(&rowNumSend, 1, MPI_INT, status.MPI_SOURCE, status.MPI_SOURCE, MPI_COMM_WORLD);

            if (rowNumSend < rowNum) 
            {
                nonZeroNum = row_ptr[rowNumSend + 1] - row_ptr[rowNumSend];
                MPI_Send(&nonZeroNum, 1, MPI_INT, status.MPI_SOURCE, status.MPI_SOURCE, MPI_COMM_WORLD);
                MPI_Send(
                    &val[row_ptr[rowNumSend] - 1], 
                    nonZeroNum, 
                    MPI_DOUBLE, status.MPI_SOURCE, status.MPI_SOURCE, MPI_COMM_WORLD);
                MPI_Send(
                    &col_ind[row_ptr[rowNumSend] - 1], 
                    nonZeroNum, 
                    MPI_INT, status.MPI_SOURCE, status.MPI_SOURCE, MPI_COMM_WORLD);

                rowNumSend ++;
            }
        }        
    }
    else
    {   
		//fix: C4127
        //Main infinity loop.
        //while(true) 
        //{            
            //Receive number of row.
        MPI_Recv(&rowNumSend, 1, MPI_INT, NUMBER_ROOT_PROCESS, MPI_ANY_TAG, MPI_COMM_WORLD, &status);

		while(rowNumSend < rowNum)
			{
			//fix: C4127
            //If number of row is non-existent then break infinity loop.
            //if (rowNumSend >= rowNum) break;

            //Receive row.
            MPI_Recv(&nonZeroNum, 1, MPI_INT, NUMBER_ROOT_PROCESS, MPI_ANY_TAG, MPI_COMM_WORLD, &status);

            MPI_Recv(val, nonZeroNum, MPI_DOUBLE, NUMBER_ROOT_PROCESS, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
            MPI_Recv(col_ind, nonZeroNum, MPI_INT, NUMBER_ROOT_PROCESS, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
            
            tempResult = 0;
            #pragma omp parallel for reduction(+ : tempResult)              // openMP directive
            for (int i = 0; i < nonZeroNum; i ++ )                          //Multiply row on vector.
            {            
                tempResult += vector[col_ind[i] - 1] * val[i];
            }

            //Return resulted row to root process.
            MPI_Send(&tempResult, 1, MPI_DOUBLE, NUMBER_ROOT_PROCESS, rowNumSend, MPI_COMM_WORLD); 
			//fix: C4127    
			//Receive number of row.
	        MPI_Recv(&rowNumSend, 1, MPI_INT, NUMBER_ROOT_PROCESS, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
        }
    }
}