#include "MpiSynchronization.h"

void sendPartialMartrices()
{
	vector<int>sendcounts(processes);
	vector<int>displs(processes);


	//the first process will receive rows / processes matrix rows plus the first row of the following process 
	//to perform game of life operations
	displs[0] = 0;
	sendcounts[0] = (rows / processes + 1) * columns;

	//the middle processes will receive rows / processes matrix rows plus the last line of the previous process and the
	//first line of the next process
	for (int i = 1; i < processes - 1; i++)
	{
		displs[i] = displs[i - 1] + sendcounts[i - 1] - 2 * columns;
		sendcounts[i] = (rows / processes + 2) * columns;
	}

	//the last process will receive rows / processes matrix rows, plus the remaining lines truncated by the integer division,
	//which are rows mod processes, plus the last line of the previous process.
	displs[processes - 1] = displs[processes - 2] + sendcounts[processes - 2] - 2 * columns;
	sendcounts[processes - 1] =  (rows / processes + rows % processes + 1) * columns;

#ifdef MPI_DEBUG
	if (myrank == 0)
	{
		cout << "DISPLS: " << endl;
		for (int i = 0; i < processes; i++)
			cout << displs[i] << " ";
		cout << "\n";
		
		cout << "SENDCOUNTS: " << endl;
		for (int i = 0; i < processes; i++)
			cout << sendcounts[i] << " ";
		cout << "\n";
	}
#endif

	unsigned char* sendbuf;
	vector<unsigned char>recvbuf(sendcounts[myrank]);

	//the master retreives the buffer in which the matrix is stored
	if (myrank == 0)
	{
		//sendbuf.resize(globalUniverse ->buffer().size());
		sendbuf = globalUniverse ->bufferAddress();
		
#ifdef MPI_DEBUG
		cout << "sendbuf:" << endl;
		for (int i = 0; i < globalUniverse -> height() * globalUniverse -> width(); i++)
			cout << (int)sendbuf[i] << " ";
		cout << "\n";
#endif

	}

	MPI_Barrier(MPI_COMM_WORLD);
	MPI_Scatterv(sendbuf, &sendcounts[0], &displs[0], MPI_UNSIGNED_CHAR, &recvbuf[0], sendcounts[myrank], MPI_UNSIGNED_CHAR, 0, MPI_COMM_WORLD);
	
#ifdef MPI_DEBUG
	for (int i = 0; i < sendcounts[myrank]; i++)
		cout << "[" << myrank << "," << (int)recvbuf[i] << "]";
	cout << "\n";
#endif

	//each process creates two matrices, one storing the current status and one storing the new computed status
	currentUniverse = new SimpleMatrix<unsigned char>(&recvbuf[0], sendcounts[myrank], columns);
	nextUniverse = new SimpleMatrix<unsigned char>(currentUniverse -> height(), currentUniverse -> width(), 0);
	
#ifdef	MPI_DEBUG
	//DEBUG
	cout << "\n\nProcess "<< myrank << " Partial Matrix: " << endl;
	currentUniverse -> printIndices();
#endif
	
	
	sendcounts.clear();
	displs.clear();
	recvbuf.clear();

	//the master clears the memory of the global universe since it is not needed anymore after this point
	if (myrank == 0)
	{
		delete globalUniverse;
	}

#ifdef MPI_DEBUG
	cout << "Process " << myrank << " exited sendPartialMatrix correctly" << endl;
#endif
}

void gatherPartialMatrices()
{
	vector<int>recvcounts(processes);
	vector<int>displs(processes);
	unsigned char* sendbuf = currentUniverse -> bufferAddress();
	vector<unsigned char>recvbuf;

	//each process receives rows / processes matrix rows (we discard the additional rows used during the computation because
	//they have been updated by other processes)
	displs[0] = 0;
	recvcounts[0] = (rows / processes) * columns;

	for (int i = 1; i < processes - 1; i++)
	{
		displs[i] = displs[i - 1] + recvcounts[i - 1];
		recvcounts[i] = rows / processes * columns;
	}

	//the last process receive rows / processes matrix rows plus those truncated by the previous integer division. We discard
	//the row used during the computation as in the previous case.
	displs[processes - 1] = displs[processes - 2] + recvcounts[processes - 2];
	recvcounts[processes - 1] = (rows / processes + rows % processes) * columns;

#ifdef MPI_DEBUG
	cout << "DISPLS: " << myrank << endl;
	for (int i = 0; i < processes; i++)
		cout << displs[i] << " ";
	cout << endl;
		
	cout << "RECVCOUNTS: " << myrank << endl;
	for (int i = 0; i < processes; i++)
		cout << recvcounts[i] << " ";
	cout << endl;
#endif

	if (myrank == 0)
		recvbuf.resize(rows * columns);

	MPI_Barrier(MPI_COMM_WORLD);

	MPI_Gatherv(sendbuf + (myrank == 0 ? 0 : columns), recvcounts[myrank], MPI_UNSIGNED_CHAR, &recvbuf[0], &recvcounts[0], &displs[0], MPI_UNSIGNED_CHAR, 0, MPI_COMM_WORLD);

#ifdef MPI_DEBUG
	if (myrank == 0)
	{
		for (int i = 0; i < rows * columns; i++)
			cout << (int)recvbuf[i] << " ";
		cout << endl;
	}
#endif

	//partial matrices not needed after this point
	delete currentUniverse;
	delete nextUniverse;

	if (myrank == 0)
	{
		globalUniverse = new SimpleMatrix<unsigned char>(&recvbuf[0], rows * columns, columns);
		vector<char> symbols(2);
		vector<unsigned char> elements(2);
		symbols[0] = 'X';
		symbols[1] = 'O';
		elements[0] = 0;
		elements[1] = 1;

#ifdef MPI_DEBUG
		cout << "\n\nUniverse Matrix: \n" << endl;
		globalUniverse -> print(symbols, elements);
		cout << "\n\n *** End Universe Matrix *** \n" << endl;
		globalUniverse ->printIndices();
#endif
	}

	recvcounts.clear();
	displs.clear();

	if (myrank == 0)
		recvbuf.clear();

#ifdef MPI_DEBUG
	cout << "Process " << myrank << " exited gatherPartialMatrixes correctly" << endl;
#endif

}

void updatePartialMatrices()
{
	vector<char>symbols(2);
	vector<unsigned char>elements(2);
	if (myrank == 0)
	{
		MPI_Status status;
		vector<unsigned char> sendbuf = currentUniverse -> getRow(currentUniverse -> height() - 2);
		vector<unsigned char> recvbuf(currentUniverse -> width());
		MPI_Send(&sendbuf[0], sendbuf.size(), MPI_UNSIGNED_CHAR, 1, 0, MPI_COMM_WORLD);
		MPI_Recv(&recvbuf[0], recvbuf.size(), MPI_UNSIGNED_CHAR, 1, 1, MPI_COMM_WORLD, &status);
		currentUniverse -> setRow(recvbuf, currentUniverse -> height() - 1);
		sendbuf.clear();
		recvbuf.clear();
	}
	else if (myrank == processes - 1)
	{
		if (myrank % 2 == 0)
		{
			MPI_Status status;
			vector<unsigned char> sendbuf = currentUniverse -> getRow(1);
			vector<unsigned char> recvbuf(currentUniverse -> width());
			MPI_Send(&sendbuf[0], sendbuf.size(), MPI_UNSIGNED_CHAR, myrank - 1, myrank, MPI_COMM_WORLD);
			MPI_Recv(&recvbuf[0], recvbuf.size(), MPI_UNSIGNED_CHAR, myrank - 1, myrank - 1, MPI_COMM_WORLD, &status);
			currentUniverse -> setRow(recvbuf, 0);
			sendbuf.clear();
			recvbuf.clear();
		}
		else
		{
			MPI_Status status;
			vector<unsigned char> sendbuf = currentUniverse -> getRow(1);
			vector<unsigned char> recvbuf(currentUniverse -> width());
			MPI_Recv(&recvbuf[0], recvbuf.size(), MPI_UNSIGNED_CHAR, myrank - 1, myrank - 1, MPI_COMM_WORLD, &status);
			MPI_Send(&sendbuf[0], sendbuf.size(), MPI_UNSIGNED_CHAR, myrank - 1, myrank, MPI_COMM_WORLD);
			currentUniverse -> setRow(recvbuf, 0);
			sendbuf.clear();
			recvbuf.clear();
		}
	}
	else
	{
		if (myrank % 2 == 0)
		{
			MPI_Status status;
			vector<unsigned char> sendbuf1 = currentUniverse -> getRow(1);
			vector<unsigned char> sendbuf2 = currentUniverse -> getRow(currentUniverse -> height() - 2);
			vector<unsigned char> recvbuf1(currentUniverse -> width());
			vector<unsigned char> recvbuf2(currentUniverse -> width());
			MPI_Send(&sendbuf1[0], sendbuf1.size(), MPI_UNSIGNED_CHAR, myrank - 1, myrank, MPI_COMM_WORLD);
			MPI_Send(&sendbuf2[0], sendbuf2.size(), MPI_UNSIGNED_CHAR, myrank + 1, myrank, MPI_COMM_WORLD);
			MPI_Recv(&recvbuf2[0], recvbuf1.size(), MPI_UNSIGNED_CHAR, myrank + 1, myrank + 1, MPI_COMM_WORLD, &status);
			MPI_Recv(&recvbuf1[0], recvbuf2.size(), MPI_UNSIGNED_CHAR, myrank - 1, myrank - 1, MPI_COMM_WORLD, &status);
			currentUniverse -> setRow(recvbuf1, 0);
			currentUniverse -> setRow(recvbuf2, currentUniverse -> height() - 1);
			sendbuf1.clear();
			sendbuf2.clear();
			recvbuf1.clear();
			recvbuf2.clear();

		}
		else
		{
			MPI_Status status;
			vector<unsigned char> sendbuf1 = currentUniverse -> getRow(1);
			vector<unsigned char> sendbuf2 = currentUniverse -> getRow(currentUniverse -> height() - 2);
			vector<unsigned char> recvbuf1(currentUniverse -> width());
			vector<unsigned char> recvbuf2(currentUniverse -> width());
			MPI_Recv(&recvbuf2[0], recvbuf1.size(), MPI_UNSIGNED_CHAR, myrank + 1, myrank + 1, MPI_COMM_WORLD, &status);
			MPI_Recv(&recvbuf1[0], recvbuf2.size(), MPI_UNSIGNED_CHAR, myrank - 1, myrank - 1, MPI_COMM_WORLD, &status);
			MPI_Send(&sendbuf1[0], sendbuf1.size(), MPI_UNSIGNED_CHAR, myrank - 1, myrank, MPI_COMM_WORLD);
			MPI_Send(&sendbuf2[0], sendbuf2.size(), MPI_UNSIGNED_CHAR, myrank + 1, myrank, MPI_COMM_WORLD);
			currentUniverse -> setRow(recvbuf1, 0);
			currentUniverse -> setRow(recvbuf2, currentUniverse -> height() - 1);
			sendbuf1.clear();
			sendbuf2.clear();
			recvbuf1.clear();
			recvbuf2.clear();
		}
	}

#ifdef MPI_DEBUG
	symbols[0] = 'X';
	symbols[1] = 'O';
	elements[0] = 0;
	elements[1] = 1;
	cout << "\n\nProcess "<< myrank << " Partial Matrix: " << endl;
	currentUniverse ->print(symbols,elements);
#endif
}

void synchMatrixDimensions()
{
	if (myrank == 0)
	{
		rows = globalUniverse ->height();
		columns = globalUniverse ->width();
		for(int i = 1; i < processes; i++)
		{
			MPI_Send(&rows,1,MPI_INT,i,MSG_ROW,MPI_COMM_WORLD);
			MPI_Send(&columns,1,MPI_INT,i,MSG_COLUMN,MPI_COMM_WORLD);
		}
	}
	else
	{
		MPI_Status status;
		MPI_Recv(&rows,1,MPI_INT,0,MSG_ROW,MPI_COMM_WORLD,&status);
		MPI_Recv(&columns,1,MPI_INT,0,MSG_COLUMN,MPI_COMM_WORLD,&status);
	}

}