#include "mpi.h"
#include "LAPackHeader.h"
#include "My_Math.h"

#include <fstream.h>


#define GTAG 99
#define Res_TAG 66
#define OpCode_TAG 55
#define MASTER 0


int commSize;
int commRank;
char processorName[64];


void CMPI_Initialise(int argc, char * argv[])
{	
	int  namelen; // temporary variable
	MPI_Init(&argc,&argv);
    MPI_Comm_size(MPI_COMM_WORLD,&commSize);
    MPI_Comm_rank(MPI_COMM_WORLD,&commRank);
    MPI_Get_processor_name(processorName,&namelen);
	/*cout << "\n MPI Initialised:" 
		 << "\n Machine Name: " << processorName
		 << "\n Machine Rank: " << commRank
		 << "\n Comm Size   : " << commSize 
		 << "\n ================================" << flush;*/
}
/******************************************************************************/
								//Destructor
void CMPI_Finalise()
{
	MPI_Finalize();
}
/******************************************************************************/
// 0 --> calculation failed
// 1 --> calculation succeeded
int calculateDGTSV_Master(CMy_Matrix *triD, CMy_Matrix *b, char *taskname, int slaveID=1, int size=1)
{		
	int funcRes = 0;

	// vectors holding main, lower, and upper diagonal and the b vector Ax = b
	CMy_Matrix *md=NULL, *ld=NULL, *ud=NULL; 

	MPI_Status status;
	int matrixSize, calculationsSuccess, opCode;
	
	if( triD->getDiagonals(md, ld, ud) == 1 )
	{		
		// Sending an operational Code: 0 to stop  and 1 to start
		// wake up slave 1
		//cout << "\n Sending Wake up Message to Slave Number:  "<< slaveID << flush;
		opCode = 1;
		MPI_Send(&opCode, 1, MPI_INT, slaveID, OpCode_TAG, MPI_COMM_WORLD);
						// Start Sending the values	
			// 1. send Vector Matrix size		
		matrixSize = triD->getRowCount();
		MPI_Send(&matrixSize, 1, MPI_INT, slaveID , GTAG, MPI_COMM_WORLD);
	
			// 2. send the main diagonal
		MPI_Send(md->mat[0], matrixSize, MPI_DOUBLE, slaveID , GTAG, MPI_COMM_WORLD);
	
			// 3. send the lower diagonal
		MPI_Send(ld->mat[0], matrixSize-1, MPI_DOUBLE, slaveID , GTAG, MPI_COMM_WORLD);
	
			// 4. send the upper diagonal
		MPI_Send(ud->mat[0], matrixSize-1, MPI_DOUBLE, slaveID , GTAG, MPI_COMM_WORLD);
	
			// 5. send the b vector
		MPI_Send(b->mat[0], matrixSize, MPI_DOUBLE, slaveID , GTAG, MPI_COMM_WORLD);			
	
		
					// Receiving the result
			// 1. receive the calculation result falg
		MPI_Recv(&calculationsSuccess, 1, MPI_INT, slaveID, Res_TAG, MPI_COMM_WORLD, &status);
	
		if(calculationsSuccess)
		{
			// 2. receive the result vector 
			MPI_Recv(b->mat[0], matrixSize, MPI_DOUBLE, slaveID , Res_TAG, MPI_COMM_WORLD, &status);
			funcRes = 1;
		}
		else  // DGTSV function returned an error
			funcRes = 0;
	}		
	//else could not get the 3 diagonals --> funcRes = 0;

	// delete the three internally created vectors (created by calling the getDiagonal function)
	delete md;
	delete ud;
	delete ld;


	return funcRes;
}
/******************************************************************************/
void calculateDGTSV_Slave()
{	
	// vectors holding main, lower, and upper diagonal and the b vector Ax = b
	CMy_Matrix *md=NULL, *ld=NULL, *ud=NULL, *b=NULL; 

	MPI_Status status;
	int matrixSize, calculationsSuccess, opCode;

	matrixSize=0;
						// Receiving an operational Code: 0 to stop  and 1 to start
	MPI_Recv(&opCode, 1, MPI_INT, MASTER, OpCode_TAG, MPI_COMM_WORLD, &status);	
	
	
	while(opCode != 0)
	{
		//cout << "\n " << processorName << " received a Wake up Message "<< flush;

		// start receiving the values
			// 1. receving the Matrix size
		MPI_Recv(&matrixSize, 1, MPI_INT, MASTER, GTAG, MPI_COMM_WORLD, &status);

		
			// 2. receive the main diagonal
		md = new CMy_Matrix(1, matrixSize, "SLAVE MD");
		MPI_Recv(md->mat[0], matrixSize, MPI_DOUBLE, 0 , GTAG, MPI_COMM_WORLD, &status);		
			// 3. receive the lower diagonal
		ld = new CMy_Matrix(1, matrixSize-1, "SLAVE LD");
		MPI_Recv(ld->mat[0], matrixSize-1, MPI_DOUBLE, 0 , GTAG, MPI_COMM_WORLD, &status);
			// 4. receive the upper diagonal
		ud = new CMy_Matrix(1, matrixSize-1, "SLAVE LD");
		MPI_Recv(ud->mat[0], matrixSize-1, MPI_DOUBLE, 0 , GTAG, MPI_COMM_WORLD, &status);
			// 5. receive the b vector
		b = new CMy_Matrix(1, matrixSize, "SLAVE B Vector");
		MPI_Recv(b->mat[0], matrixSize, MPI_DOUBLE, 0 , GTAG, MPI_COMM_WORLD, &status);

		long NRHS=1, info;
		info = dgtsv(matrixSize, NRHS, ld->mat[0], md->mat[0], ud->mat[0], b->mat[0], matrixSize);		
		calculationsSuccess = ! info;
							// 5. send the result vector 
			//1. Sending the result FLAG
		MPI_Send(&calculationsSuccess, 1, MPI_INT, 0 , Res_TAG, MPI_COMM_WORLD);
		if(calculationsSuccess)	// 2. Sending the B vector
			MPI_Send(b->mat[0], matrixSize, MPI_DOUBLE, 0 , Res_TAG, MPI_COMM_WORLD);
		
		

		// delete the four internally created vectors
		delete md;
		delete ud;
		delete ld;
		delete b;


		// wait for another operation code
		MPI_Recv(&opCode, 1, MPI_INT, MASTER, OpCode_TAG, MPI_COMM_WORLD, &status);
	}	
}
/******************************************************************************/
int calculateDGTSV_Locally(CMy_Matrix *triD, CMy_Matrix *b, char *taskname, int size=1)
{
	// vectors holding main, lower, and upper diagonal and the b vector Ax = b
	CMy_Matrix *md=NULL, *ld=NULL, *ud=NULL; 	
	int calculationsSuccess=0; // default: failed to calculate the function
	int matrixSize=triD->getRowCount();
	if( triD->getDiagonals(md, ld, ud) == 1 )
	{
		long NRHS=1, info;
		info = dgtsv(matrixSize, NRHS, ld->mat[0], md->mat[0], ud->mat[0], b->mat[0], matrixSize);		
		calculationsSuccess = ! info;		
	}

	// delete the three internally created vectors (created by calling the getDiagonal function)
	delete md;
	delete ud;
	delete ld;
	

	return calculationsSuccess;
}
/******************************************************************************/
int main(int argc,char *argv[])
{	
	CMPI_Initialise(argc, argv);

	int opCode, slaveID=1;

	double startTime_MPI=0.0, endTime_MPI;

	time_t startTime_Loc, endTime_Loc;

	CMy_Matrix *triD, *b, *b2; // Tridiagonal matrix

	if(commRank != 0)
		calculateDGTSV_Slave();


	if(commRank == 0) // I am MASTER	
	{		
		char taskName[80], tmp[10], operationMode; 
		// operationMode = 0 --> local
		// operationMode = 1 --> MPI

		int taskMatrixSize, batchSize, i=1;
		ifstream f("C:\\Work\\Developping\\TestLAPack\\Release\\tasks.batch.txt");

		startTime_Loc = time (NULL);
		startTime_MPI = MPI_Wtime();


		
		// get the Operation Mode
		f.getline(&operationMode, 80);

		if(operationMode == '0')
			cout <<"\n Operation Mode: Local Execution";
		else
			cout <<"\n Operation Mode: MPI Execution (One Master and one Slave)";

		
		// get the batch size
		f.getline(tmp, 80);
		batchSize = CMy_Math_Base::myAtoi(tmp)-1; // I already have a problem including the stdlib.h

		while(!f.eof())
		{
			f.getline(taskName, 80);
			f.getline(tmp, 80);
			taskMatrixSize = CMy_Math_Base::myAtoi(tmp); // I already have a problem including the stdlib.h
						
			cout << "\n " << processorName << " Initiated a Taks " << " (" << i << " of " << batchSize << " ) "
				 <<"\n Task Name: " << taskName
				 << "\n Task matrix size " << taskMatrixSize << " * " << taskMatrixSize << flush;

			
			
			triD = new CMy_Matrix(taskMatrixSize, taskMatrixSize, "TriDiagonal Matrix");
			triD->randomiseTriDiagonalMatrix();
			b = new CMy_Matrix(1, triD->getColCount(), "B Vector");
			b->randomiseMatrix();			
			b2 = b->clone();// another copy of the B Vector

			
			
			if( operationMode == '0') ////   Run the task locally
			{
				if (calculateDGTSV_Locally(triD, b2, taskName, taskMatrixSize))
				{
					//endTime_Loc = time (NULL);
					//cout << "\n Task successfully finished (Locally) in = " << (endTime_Loc - startTime_Loc);
					cout << "\n Task successfully finished (Locally) ";
				}
				else
					cout << "\n Task UN-successfully finished (Locally)";
			}
			else ////   Run the task using MPI
			{			
				if (calculateDGTSV_Master(triD, b, taskName, slaveID, taskMatrixSize))
				{
					//endTime_MPI = MPI_Wtime();
					//cout << "\n Task successfully finished (MPI-Based) in = " << endTime_MPI - startTime_MPI;
					cout << "\n Task successfully finished (MPI-Based) ";
				}
				else
					cout << "\n Task UN-successfully finished (MPI-Based)";
			}
			cout << "\n ======================================="<< flush;		
			
			delete triD;
			delete b2;			
			delete b;

			//slaveID++;
			i++; // next task in the batch
		}

		endTime_Loc = time (NULL);
		endTime_MPI = MPI_Wtime();


		if(operationMode == '0')
			cout << "\n Batch File Finished (Locally  ) in = " << endTime_Loc - startTime_Loc << " Seconds";
		else
			cout << "\n Batch File Finished (MPI-Based) in = " << endTime_MPI - startTime_MPI << " Seconds";


		f.close();

		// end slave 1
		opCode = 0;
		MPI_Send(&opCode, 1, MPI_INT, 1 , OpCode_TAG, MPI_COMM_WORLD);
	}

	CMPI_Finalise();
	cout /*<< "\n Finalised" */<< flush;
	return 1;
}