//#include "StdAfx.h"
#include "UPO_solver.h"
#include "mpi.h"
#include <sstream>


using namespace std;


#define CHECK_MSG_AMOUNT  150
#define SEND_BEST_MASK	  100

#define MSG_FINAL_MASK	 1005
#define MSG_NODES_MASK	 1006
#define MSG_WORKSEND	 1007
#define MSG_TOKEN        1008
#define MSG_FINISH       1009
#define MSG_WORKREFUSE	 1010
#define MSG_WORKASK		 1011
#define MSG_BESTMASK	 1012

#define BUFFER_SIZE		 1024






UPO_solver::UPO_solver(InputData* _inputMatrix)
	: inputMatrix(NULL),
	CurrentNodesMask(NULL),
	m_bSolvingFinished(false),
	m_bTokenSend(false),
	workReqSend(false),
	msgBuffer(NULL)
{
	inputMatrix = _inputMatrix;
	int my_rank;

	// Init current best solution to contain all nodes, because that is primitive solution
	currentBestSolution.activeNodes = inputMatrix->getMatrixDimension();
	currentBestSolution.solutionMask = new bool[inputMatrix->getMatrixDimension()];
	for (int i = 0; i < inputMatrix->getMatrixDimension(); i++)
	{
		currentBestSolution.solutionMask[i] = true;
	}

	
	// Set current solution mask to all zeros
	CurrentNodesMask = new bool[inputMatrix->getMatrixDimension()];
	for (int i = 0; i < inputMatrix->getMatrixDimension(); i++)
	{
		CurrentNodesMask[i] = false;
	}
	
	// It's use for all MPI_Send/Recv where buffer is necessary
	msgBuffer = new char[BUFFER_SIZE];
	
	// Set target of work request (will be next one)
	MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
	workReqTarget = my_rank;
	
	// Although request wasn't send all processes expect first node from 0
	if (my_rank != 0)
		workReqSend = true;
		
	// zero stats
	memset(&stats, 0, sizeof(stats));

	// Write all outpus to file insted of cerr
}

UPO_solver::~UPO_solver()
{
	SystemState state;
	
	if (currentBestSolution.solutionMask != NULL)
		delete [] currentBestSolution.solutionMask;

	// Tohle by melo smazat globalni masku reseni ale program mi tady crashne, nechapu proc, a kdyz je to 
	// takhle zakomentovany tak to ani nenahlasi zadne leaky, takze nevim kdo to maze driv
	//if (CurrentNodesMask != NULL)
	//	delete [] CurrentNodesMask;
		
	for (unsigned int i = 0; i < MainStack.size(); i++)
	{
		state = MainStack.top();
		//delete [] state; //state aren't made dynamically!!
		MainStack.pop();
	}
	
	delete [] msgBuffer;

}

void UPO_solver::WriteSolutionMask()
{
	for(int i = 0; i < inputMatrix->getMatrixDimension(); i++)
	{
		cout << currentBestSolution.solutionMask[i] << " ";
	}
	
	cout << endl;
}

void UPO_solver::Solve()
{
	int iPs;
	MPI_Comm_size(MPI_COMM_WORLD, &iPs);

	while(m_bSolvingFinished == false)
	{
		if (MainStack.empty() == false)
		{
			//cerr << "Process " << GetRank() << ": Starting to work on stack of size " << MainStack.size() << endl;
		}

		ProcessStack();
		
		if (m_bTokenSend == false && GetRank() == 0)
		{
			SendToken();
			m_bTokenSend = true;
		}

		AskForWork();
		CheckMessageQueue();
	}


	
	EmptyQueue();
	MakeParallelReduction();
}

void UPO_solver::AskForWork()
{
	int iPs;
	
	MPI_Comm_size(MPI_COMM_WORLD, &iPs);

	if (iPs == 1)
		return;

	//Empty stack and haven't sent request yet 
	if ((MainStack.empty()) && (workReqSend == false)) 
	{
		workReqSend = true;
		int my_rank = GetRank();

		//Set right target
		workReqTarget++;
		if (workReqTarget == GetRank())
			workReqTarget++;
			
		if (workReqTarget >= iPs)
		{
			workReqTarget = 0;
			if (workReqTarget == GetRank())
				workReqTarget++;
		}
		
#if ALL_OUTPUTS_OFF 
		cerr << "Process " << GetRank() << ": sending work request to: " << workReqTarget << endl;
#endif
		stats.workRequestSend++;
		
		MPI_Send (&my_rank, 1, MPI_INT, workReqTarget, MSG_WORKASK, MPI_COMM_WORLD);

	}
}

void UPO_solver::MakeParallelReduction()
{
	//cerr << "Process " << GetRank() << ": ***** Mask ***** " << endl;
	//WriteSolutionMask();
	
	MPI_Barrier (MPI_COMM_WORLD);
	
	int messageIn = currentBestSolution.activeNodes;
	int messageOut = 0;
#if ALL_OUTPUTS_OFF 
	cerr << "Process " <<	GetRank() << ": got: " << messageIn << endl;
#endif
	/* make reduction - find maximum from all values in all messages and return it to process 0 */
	MPI_Reduce (&messageIn, &messageOut, 1, MPI_INT, MPI_MIN, 0, MPI_COMM_WORLD);
	
	
	if (GetRank() == 0)
	{
		if (messageOut == currentBestSolution.activeNodes)
		{
			messageOut = -1;
#if ALL_OUTPUTS_OFF 
			cerr << "Process " << GetRank() << ": Sending broadcast with value " << messageOut << endl;
#endif
			MPI_Bcast(&messageOut, 1, MPI_INT, 0, MPI_COMM_WORLD);

			//cerr << "Process " << GetRank() << ": After reduction is best found solution " << endl;
			//cerr << "*************************" << endl;
			//WriteSolutionMask();
			//cerr << "*************************" << endl;
		}
		else
		{
#if ALL_OUTPUTS_OFF 
			cerr << "Process " << GetRank() << ": Sending broadcast with value " << messageOut << endl;
#endif
			MPI_Bcast(&messageOut, 1, MPI_INT, 0, MPI_COMM_WORLD);
		
			bool *mask;
			//char buf[BUFFER_SIZE];
			MPI_Status status;
			
			MPI_Recv(msgBuffer, BUFFER_SIZE, MPI_CHAR, MPI_ANY_SOURCE, MSG_FINAL_MASK, MPI_COMM_WORLD, &status);
			
	
			mask = DeserializeBestMask(msgBuffer);

			
			
			currentBestSolution.CopyMask(mask,inputMatrix->getMatrixDimension());

			delete [] mask;
		}

		cerr << "Process " << GetRank() << ": After reduction is best found solution " << endl;
		cerr << "*************************" << endl;
		WriteSolutionMask();
		cerr << "*************************" << endl;
	}
	else
	{
		MPI_Bcast(&messageOut, 1, MPI_INT, 0, MPI_COMM_WORLD);
#if ALL_OUTPUTS_OFF 
		cerr << "Process " << GetRank() << ": Broadcast recieved, msgout  = " << messageOut << endl;
#endif
		if (messageOut == currentBestSolution.activeNodes)
		{			
			char *bestMask;
			
			bestMask = SerializeBestMask();
			MPI_Send (bestMask, inputMatrix->getMatrixDimension(), MPI_CHAR, 0, MSG_FINAL_MASK, MPI_COMM_WORLD);
			cerr << "Process " << GetRank() << ": Broadcast recieved and my mask send" << endl;
			
			cerr << "Process " << GetRank() << ": After reduction is best found solution " << endl;
			cerr << "*************************" << endl;
			WriteSolutionMask();
			cerr << "*************************" << endl;
			
			delete [] bestMask;
		}
	}
}

void UPO_solver::SplitWork()
{
	int totalNodes;
	// Empty MainStack if there is something left
	for (unsigned int i = 0; i < MainStack.size(); i++)
	{
		MainStack.pop();
	}

	// Get total number of nodes
	totalNodes = inputMatrix->getMatrixDimension();

	// Create first root state to start DFS
	SystemState state;
	state.totalNodes = totalNodes;
	state.coveredLines = 0;
	state.indexToNodes = 0;
	state.targetNode = -1;

	// Add root to stack
	MainStack.push(state);
	
	//return;

	int iPs = 3;
	MPI_Comm_size(MPI_COMM_WORLD, &iPs);

	while((int)MainStack.size() < iPs && (int)MainStack.size() > 0 )
	{
		SystemState actualState = MainStack.top();
		
		// Remove top from stack
		MainStack.pop();
		ProcessStackItem(actualState);
	}

	if (MainStack.size() == iPs)
	{
		for (int i = 1; i < iPs; i++)
		{
			SystemState state = MainStack.top();
			MainStack.pop();
			std::stack<SystemState> st;
			st.push(state);
#if ALL_OUTPUTS_OFF 
			cerr << "Process " << GetRank() << ": sending starting data to process: " << i << endl;
#endif
			SerializeStack(st);
			MPI_Send (msgBuffer, BUFFER_SIZE, MPI_PACKED, i, MSG_WORKSEND, MPI_COMM_WORLD);
			
			CurrentNodesMask[state.targetNode] = false;
			
			//sleep(1);
		}
	}
	else
	{

	}

}

// Temporary split work
// Enable/disable it in PAR.cpp for all processes
void
UPO_solver::DivideJob() 
{
	int my_rank;
	int tag = 1;
	int numPro;
	MPI_Status status;
	int totalNodes;
	static SystemState state;
	
	totalNodes = inputMatrix->getMatrixDimension();
	
	// budu posilat nasledujici nod za rootem
	state.totalNodes = totalNodes;
	state.coveredLines = 0;
	state.indexToNodes = 1;
	state.targetNode = 0;

	/* get my rank */
	MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
  	/* find out number of processes */
	MPI_Comm_size(MPI_COMM_WORLD, &numPro);
	
	if (my_rank == 0) {								
		for (int source = 1; source < numPro; source++) {		
			MPI_Send(&state, 4, MPI_INT, 
				source, tag, MPI_COMM_WORLD);
			
			state.indexToNodes++; /* dalsi nody za rootem (vertikalne) */
			state.targetNode++;
		}
		
		/* !!! misto roota by mel mit na stacku vsecny zbyle uzly nasled. za rootem */
		/* posledniho musim nastavit na klas. root */
		state.indexToNodes = numPro - 1;
		state.targetNode = -1;		

	} 
	else {
		MPI_Recv(&state, 4, MPI_INT, MPI_ANY_SOURCE, 
			MPI_ANY_TAG, MPI_COMM_WORLD, &status);
	}
	
	/* kazdymu nodu musim nastavit masku a coveredLines */
	if (my_rank != 0) {
		CurrentNodesMask[state.targetNode] = true;

		for (int i = 0; i < totalNodes; i++ )
		{				
			if (inputMatrix->matrix[state.targetNode][i] == 1
				&& CurrentNodesMask[i] == false)
			{
				state.coveredLines++;
			}
		}
	}
	
	MainStack.push(state);
}


void UPO_solver::EmtyStack()
{
	// Empty MainStack if there is something left
	for (unsigned int i = 0; i < MainStack.size(); i++)
	{
		MainStack.pop();
	}
}


void UPO_solver::ProcessStackItem(SystemState actualState)
{
	int iRow;
	int totalNodes = inputMatrix->getMatrixDimension();
	
	
	// Get index to array of nodes
	iRow = actualState.indexToNodes;
	actualState.indexToNodes++;

	//  If index is out of bounds or number of active nodes is bigger or same as current best solution => break this branch
	if (actualState.indexToNodes > totalNodes || GetActiveNodesNumber(CurrentNodesMask,totalNodes) >= currentBestSolution.activeNodes-1)
	{
		/* targetNode of root is -1 */
		if (actualState.targetNode == -1)
			return;
			
		CurrentNodesMask[actualState.targetNode] = false; // Set flag of this node to true in nodes mask (=we added this node)
		return;
	}
	MainStack.push(actualState);

	// Generate a new node by expanding the current one
	bool bPushState = true;
	SystemState newState;
	newState.CopyState(actualState); // Make a copy of current one
	newState.targetNode = iRow;

	CurrentNodesMask[iRow] = true; // Set flag of this node to true in nodes mask (=we added this node)
	
	// Get all new edges from the new created node
	for (int i = 0; i < totalNodes; i++ )
	{				
		if (inputMatrix->matrix[iRow][i] == 1
			&& CurrentNodesMask[i] == false)
		{
			newState.coveredLines++;
			// If covered edges == total number of edges, we found current best solution
			if (newState.coveredLines == inputMatrix->getEdgesNumber())
			{
				currentBestSolution.CopyMask(CurrentNodesMask,totalNodes);
				currentBestSolution.activeNodes = GetActiveNodesNumber(CurrentNodesMask,totalNodes); 
				bPushState = false;
			}
		}
	}

	// If new state is still on our promising way, put it on the stack
	if (bPushState == true && GetActiveNodesNumber(CurrentNodesMask,totalNodes) < currentBestSolution.activeNodes - 1)
	{
		MainStack.push(newState);
	}
	else
	{	
		// if this branch is not promising or it was solution backtrack and set its position in mask to 0
		CurrentNodesMask[iRow] = false;
	}

	//return NULL;
}


void UPO_solver::ProcessStack()
{
	static int sendMask = 0;
				
	while(!MainStack.empty()) // While we have something on stack
	{	
		
		if (sendMask == SEND_BEST_MASK) {
			//SendBestMask();
			sendMask = 0;	
		}
		sendMask++;
		
		CheckMessageQueue();
		
		// Get top of the stack
		SystemState actualState = MainStack.top();

		if (MainStack.size() > stats.maxStackSize)
			stats.maxStackSize = MainStack.size();
		
		// Remove top from stack
		MainStack.pop();
		
		stats.expandNodes++;

		ProcessStackItem(actualState);
	}
}


void UPO_solver::SendToken()
{
	int iPs;
	int iToken = 0;

	MPI_Comm_size(MPI_COMM_WORLD, &iPs);

	if (iPs == 1)
	{
		m_bSolvingFinished = true;
	}
	else
	{

		int dest = GetRank() + 1;
		if (dest > (iPs-1))
			dest = 0;
#if ALL_OUTPUTS_OFF 
		cerr << "Process " << GetRank() << ": Sending message MSG_TOKEN with value " << iToken << ", to process " << dest << endl;
#endif
		stats.msgToken++;
		MPI_Send (&iToken, 1, MPI_INT, dest, MSG_TOKEN, MPI_COMM_WORLD);
	}
	//m_bSolvingFinished = true;
	
}
char* UPO_solver::SerializeBestMask()
{
	char *bestMask;
	int edges = inputMatrix->getMatrixDimension();
	bestMask = new char [edges];
	
	for (int i = 0; i < edges; i++) {
		if (currentBestSolution.solutionMask[i])
			bestMask[i] = 1;
		else
			bestMask[i] = 0;
	}
	
	return bestMask;
}

bool* UPO_solver::DeserializeBestMask(char *str)
{	
	int edges = inputMatrix->getMatrixDimension();
	bool *mask = new bool[edges];
	
	for (int i = 0; i < edges; i++) {
		if (str[i] == '1')
			mask[i] = true;
		else
			mask[i] = false;
	}
	
	return mask;
}

/*
 *	Serialization structure
 *	0) Number of nodes
 *	1) size of stack
 *	2) stack items
 *		2.1) coveredLines
 *		2.2) indexToNodes
 *		2.3) targetNode
 *		2.4) totalNodes
 *  3) Current node mask
 *	***4) Current best solution 
 */
void 
UPO_solver::SerializeStack(std::stack<SystemState> stack)
{	
	SystemState state;
	int numberOfNodes = inputMatrix->getMatrixDimension();
	int stackSize = stack.size();
	int position = 0;
	char *mask;
	
memset(msgBuffer, 0, BUFFER_SIZE);

	MPI_Pack(&numberOfNodes, 1, MPI_INT, msgBuffer, BUFFER_SIZE, &position, MPI_COMM_WORLD);
	MPI_Pack(&stackSize, 1, MPI_INT, msgBuffer, BUFFER_SIZE, &position, MPI_COMM_WORLD);
	
	
	while (! stack.empty()) {
		state = stack.top();
		stack.pop();
		
		MPI_Pack(&state, 4, MPI_INT, msgBuffer, BUFFER_SIZE, &position, MPI_COMM_WORLD);
	}
	
		
	mask = new char[numberOfNodes];
	
	for (int i = 0; i < numberOfNodes; i++) {
		if (CurrentNodesMask[i])
			 mask[i] = '1';
		else
			mask[i] = '0';
	}
	
	
#if 0
	fprintf(stderr, "Process %d: SerializeStack\n", GetRank());
	for ( int i = 0; i < 50; i++)
		fprintf(stderr, "%x", msgBuffer[i]);
	fprintf(stderr, "\n");
	//sleep(2);	
#endif

	MPI_Pack(mask, numberOfNodes, MPI_CHAR, msgBuffer, BUFFER_SIZE, &position, MPI_COMM_WORLD);

	delete [] mask;


}

std::stack<SystemState> 
UPO_solver::DeserializeStack(void)
{
	stack<SystemState> stack;
	int numberOfNodes;
	int stackSize;
	int position = 0;
	char *mask;

	MPI_Unpack(msgBuffer, BUFFER_SIZE, &position, &numberOfNodes, 1, MPI_INT, MPI_COMM_WORLD);
	MPI_Unpack(msgBuffer, BUFFER_SIZE, &position, &stackSize, 1, MPI_INT, MPI_COMM_WORLD);
	
#if 0
	fprintf(stderr, "Process %d: DeserializeStack\n", GetRank());
	for ( int i = 0; i < 50; i++)
		fprintf(stderr, "%x", msgBuffer[i]);
	fprintf(stderr, "\n");	
#endif	
	
	for (int i = 0; i < stackSize; i++) {
		SystemState state;
		MPI_Unpack(msgBuffer, BUFFER_SIZE, &position, &state, 4, MPI_INT, MPI_COMM_WORLD);
		stack.push(state);
	}
	
#if 0
	SystemState state = stack.top();
	cerr << "Process " << GetRank() <<": Data " << numberOfNodes << "," <<stackSize << ","
		<<state.indexToNodes<<","
		<<state.coveredLines<<","
		<<state.totalNodes<<","
		<<state.targetNode<<endl;
#endif
	
	mask = new char[numberOfNodes];
	MPI_Unpack(msgBuffer, BUFFER_SIZE, &position, mask, numberOfNodes, MPI_CHAR, MPI_COMM_WORLD);


	for (int i = 0; i < numberOfNodes; i++) {
		if (mask[i] == '1')
			 CurrentNodesMask[i] = true;
		else
			CurrentNodesMask[i] = false;
	}

	delete [] mask;
	
	return stack;
}


void UPO_solver::CheckMessageQueue()
{

	static int counter = 0;	
	counter++;
	if (counter == CHECK_MSG_AMOUNT)
	{
		int flag;
		MPI_Status status;
		char message[BUFFER_SIZE];
		std::stringstream str;
		std::stringstream str2;		
		std:string string;
		int token;
		int tmp;
		
		counter = 0;

		//cerr << "Process " << GetRank() << ": Checking message queue" << endl;

		MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag, &status);

		while(flag == 1)
		{
			flag = 0;
			//cerr << "Process " << GetRank() << ": Message is waiting" << endl;

			int dest;
			int iPs;

			switch (status.MPI_TAG) 
			{
				case MSG_TOKEN:

					MPI_Recv(&token, 1, MPI_INT, MPI_ANY_SOURCE, MSG_TOKEN, MPI_COMM_WORLD, &status);


					//cerr << "Process " << GetRank() << ": Recieved message MSG_TOKEN with value " << token << endl;

					// 0 means token is white, so if it is white look to my stack, and is not empty make ot black
					if (token == 0)
						token = MainStack.empty() ? 0 : 1;
					

					if (GetRank() == 0)
					{

						if (token == 0)
						{
							int xx = 0;
							MPI_Comm_size(MPI_COMM_WORLD, &iPs);
							for (int i = 1; i < iPs; i++)
							{
								//cerr << "Process " << GetRank() <<  ": Sending message MSG_FINISH to process " << i << endl;
								MPI_Send (&token, 1, MPI_INT, i, MSG_FINISH, MPI_COMM_WORLD);
								//cerr << "Process " << GetRank() <<  ": Sent message MSG_FINISH to process " << i << endl;
							}
#if ALL_OUTPUTS_OFF 
							cerr << "Process " << GetRank() << ": Sent all FINAL tokens" << endl;
#endif
							m_bSolvingFinished = true;
						}
						else
						{
							if (MainStack.empty())
							{
								token = 0;
								iPs = 2;
								MPI_Comm_size(MPI_COMM_WORLD, &iPs);
								dest = GetRank() + 1;
								if (dest > (iPs-1))
									dest = 0;

								//cerr << "Process " << GetRank() << ": Sending message MSG_TOKEN with value " << token 
								//	<< ", to process " << dest << endl;

								stats.msgToken++;
								MPI_Send (&token, 1, MPI_INT, dest, MSG_TOKEN, MPI_COMM_WORLD);
								
								//cerr << "Process " << GetRank() <<  ": Sent message MSG_TOKEN to process " << dest << endl;
							}
							else
							{
								m_bTokenSend = false;
							}
							//m_bSolvingFinished = true;

						}
						
					}
					else
					{
						MPI_Comm_size(MPI_COMM_WORLD, &iPs);
						dest = GetRank() + 1;
						if (dest > (iPs-1))
							dest = 0;

						//cerr << "Process " << GetRank() << ": Sending message MSG_TOKEN with value " << token << ", to process " << dest << endl;
						
						stats.msgToken++;
						MPI_Send (&token, 1, MPI_INT, dest, MSG_TOKEN, MPI_COMM_WORLD);

						//if (token == 0)
						//m_bSolvingFinished = true;
					}
					
					break;


				case MSG_WORKSEND:
					MPI_Recv(msgBuffer, BUFFER_SIZE, MPI_PACKED, MPI_ANY_SOURCE, MSG_WORKSEND, MPI_COMM_WORLD, &status);

					stats.workReceive++;
					MainStack = DeserializeStack();
#if ALL_OUTPUTS_OFF 	
					cerr << "Process " << GetRank() << ": Recieved work, " << " new stack size: " << MainStack.size()  << endl;
#endif	
					workReqSend = false;
					
					break;
					
				case MSG_WORKREFUSE:
					MPI_Recv(&tmp, 1, MPI_INT, MPI_ANY_SOURCE, MSG_WORKREFUSE, MPI_COMM_WORLD, &status);
					stats.workRequestRefuse++;
#if ALL_OUTPUTS_OFF 				
					cerr << "Process " << GetRank() << ": Recieved work refuse" << endl;
#endif
					if (workReqSend == false) {
						cerr << "ERROR: P" << GetRank() << ": Recieved work refuse but did't ask for work" << endl;
					}
					
					workReqSend = false;
					
					break;
					
				case MSG_WORKASK:
					int my_rank;
					my_rank = GetRank();
					
					stats.workRequestReceive++;
					MPI_Recv(&tmp, 1, MPI_INT, MPI_ANY_SOURCE, MSG_WORKASK, MPI_COMM_WORLD, &status);
#if ALL_OUTPUTS_OFF 	
					cerr << "Process " << GetRank() << ": recieved MSG_WORKASK from " << tmp << endl;
#endif
					if (MainStack.size() > 2)
					{
						SendWork(tmp);
					}
					else
					{
						MPI_Send (&my_rank, 1, MPI_INT, tmp, MSG_WORKREFUSE, MPI_COMM_WORLD);
					}
					break;
#if 0	
				case MSG_BESTMASK:
					MPI_Recv(msgBuffer, inputMatrix->getMatrixDimension(), MPI_CHAR, MPI_ANY_SOURCE, 
						MSG_BESTMASK, MPI_COMM_WORLD, &status);
						
					ReceiveBestMask();
				
					break;
#endif

				case MSG_FINISH:
					MPI_Recv(message, 1, MPI_INT, MPI_ANY_SOURCE, MSG_FINISH, MPI_COMM_WORLD, &status);
					//cerr << "Process " << GetRank() << ": Recieved message MSG_FINISH" << endl;

					m_bSolvingFinished = true;
					break;
			}
			flag = 0;
			MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag, &status);
		}
	}

		
}

void UPO_solver::SendWork(int dest)
{
	std::stack<SystemState> st;
	std::stack<SystemState> st_tmp;
	std::string str;
	int totalNodes = inputMatrix->getMatrixDimension();
	int i;
	
	// Split stack
	int iSplit = (int) (MainStack.size() / 2);
	for (i = 0; i < iSplit; i++) {
		SystemState state = MainStack.top();
		MainStack.pop();
		//st_tmp.push(state);
		st.push(state);
		
	}

	//cerr << "Process " << GetRank() << ": Split = " << iSplit << endl;
	//cerr << "Process " << GetRank() << ": Sending Stack of size " << st.size() << " And in My stack size = " << MainStack.size() << endl;

	
	// Sort stack
	/*
	for (i = 0; i < (int) (MainStack.size() / 2); i++) {
		SystemState state = st_tmp.top();
		st_tmp.pop();
		st.push(state);
	}
	*/
	
	stats.workSend++;
	SerializeStack(st);
	MPI_Send (msgBuffer, BUFFER_SIZE, MPI_PACKED, dest, MSG_WORKSEND, MPI_COMM_WORLD);	
	
	////  Set up correct mask for actual stack

	for (int i = 0; i < st.size(); i++)
	{
		CurrentNodesMask[st.top().targetNode] = false;
		st.pop();
	}

	//SystemState state = MainStack.top();
	//for (i = state.indexToNodes; i < totalNodes; i++)
	//	CurrentNodesMask[state.targetNode] = false;
}

void UPO_solver::EmptyQueue()
{
	int flag;
	MPI_Status status;
	char message[BUFFER_SIZE];
	int tmp;
	int token;


	MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag, &status);

	while( flag == 1)
	{
		switch (status.MPI_TAG) 
		{
			case MSG_TOKEN:
				MPI_Recv(&token, 1, MPI_INT, MPI_ANY_SOURCE, MSG_TOKEN, MPI_COMM_WORLD, &status);
				break;

			case MSG_WORKSEND:
				MPI_Recv(&message, BUFFER_SIZE, MPI_CHAR, MPI_ANY_SOURCE, MSG_WORKSEND, MPI_COMM_WORLD, &status);
				break;
				
			case MSG_WORKREFUSE:
				MPI_Recv(&tmp, 1, MPI_INT, MPI_ANY_SOURCE, MSG_WORKREFUSE, MPI_COMM_WORLD, &status);	
				break;
				
			case MSG_WORKASK:
				MPI_Recv(&tmp, 1, MPI_INT, MPI_ANY_SOURCE, MSG_WORKASK, MPI_COMM_WORLD, &status);
				break;

			case MSG_FINISH:
				MPI_Recv(&tmp, 1, MPI_INT, MPI_ANY_SOURCE, MSG_FINISH, MPI_COMM_WORLD, &status);
				break;
		}
		MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag, &status);
	}
}

#if 0
void
UPO_solver::SendBestMask() 
{
	int 	edges = inputMatrix->getMatrixDimension();
	char 	*mask;
	int		iPs;
	bool	send = false;
		
	// Dont send mask of 1's
	for (int i = 0; i < edges; i++) {
		if (currentBestSolution.solutionMask[i] == false)
			send = true;
	}
	
	if (!send)
		return;
	
	mask = SerializeBestMask();
	
	MPI_Comm_size(MPI_COMM_WORLD, &iPs);
	
	for (int i = 0; i < iPs; i++) {
		if (i == GetRank())
			continue;
			
		MPI_Send (mask, edges, MPI_CHAR, i, MSG_BESTMASK, MPI_COMM_WORLD);
	}
}

void
UPO_solver::ReceiveBestMask() 
{
	bool 	*mask;
	int 	new_count = 0;
	int 	cur_count = 0;
	int 	edges = inputMatrix->getMatrixDimension();
	
cerr << "P"<< GetRank() << "BESTMASK " << endl;

	mask = DeserializeBestMask(msgBuffer);
	
	// Which mask is better?
	for (int i = 0; i < edges; i++) {
		if (mask[i] == false)
			new_count++;
		if (currentBestSolution.solutionMask[i] == false)
			cur_count++;
	}
	
	// Take better one
	if (new_count > cur_count) {
		currentBestSolution.CopyMask(mask, edges);
		stats.newMaskTook++;
	}
	else {
		stats.newMaskRefuse++;
	}
		
	delete [] mask;
}
#endif

int UPO_solver::GetActiveNodesNumber(bool* mask, int iNodes)
{
	int activeNodes = 0;
	for(int i = 0; i < iNodes; i++)
	{
		if (mask[i] == true)
			activeNodes++;
	}
	return activeNodes;
}

void 
UPO_solver::PrintStats()
{
#if ALL_OUTPUTS_OFF 
	//cerr << "--- Process " << GetRank() << " stats ---" << endl;
	cerr << GetRank()<< "expandNodes: " << stats.expandNodes << endl;
	cerr << GetRank()<< "maxStackSize: " << stats.maxStackSize << endl;
	cerr << GetRank()<< "workRequestSend: " << stats.workRequestSend << endl;
	cerr << GetRank()<< "workRequestRefuse: " << stats.workRequestRefuse << endl;
	cerr << GetRank()<< "workRequestReceive: " << stats.workRequestReceive << endl;
	cerr << GetRank()<< "workReceiv: " << stats.workReceive << endl;
	cerr << GetRank()<< "workSend: " << stats.workSend << endl;
	cerr << GetRank()<< "msgToken: " << stats.msgToken << endl;
	//cerr << GetRank()<< "newMaskTook: " << stats.newMaskTook << endl;
	//cerr << GetRank()<< "newMaskRefuse: " << stats.newMaskRefuse << endl;
	//cerr << "--------------------------------" << endl;
#endif
}



