/*
 * SequentialCore.cpp
 *
 *  Created on: Oct 18, 2012
 *      Author: David Kadlecek, Ales Fiser
 */

#include "ParallelCore.h"

MPI_Status status;
int my_rank;
int process_count;

int iHaveToken;
int waitForTaskResponse = false;
int isTokenDirty = false;

int shouldContinueSearch = true;

ParallelCore::ParallelCore() {
	maxdepth = -1;
	maxresult = NULL;
	maxResultDepthGlobal = 0;
	inputMatrix = NULL;
	task = NULL;
}

ParallelCore::~ParallelCore() {
	delete maxresult;
	delete inputMatrix;
}

void ParallelCore::loadInputMatrix(InputMatrixHolder* inputMatrix) {
	ParallelCore::inputMatrix = inputMatrix;
	maxresult = new int16_t[ParallelCore::inputMatrix->getNumberOfNodes()];
	for (int i = 0; i < inputMatrix->getNumberOfNodes(); ++i) {
		maxresult[i] = EMPTY;
	}
}

void ParallelCore::checkAndSaveMaximum(Task* task) {
	if (logBasicDebugCore) cout << "try save max, check depth " << task->getDepth() << endl;
	if (maxdepth < task->getDepth()) {
		maxdepth = task->getDepth();
		if (logSavingMax) cout << my_rank << " saving maximum with size: " << maxdepth + 1 << endl;

		for (int i = 0; i < inputMatrix->getNumberOfNodes(); ++i) {
			maxresult[i] = task->getActualState()[i];
		}
		if (maxdepth > maxResultDepthGlobal) {
			maxResultDepthGlobal = maxdepth;
			sendMyBestResultDepthToOthers();
		}
	}
}

void ParallelCore::sendMyBestResultDepthToOthers() {
	int i;
	for (i = 0; i < process_count; i++) {
		if (i != my_rank) {
			MPI_Send(&maxResultDepthGlobal, 1, MPI_INT, i, MSG_MAX_DEPTH, MPI_COMM_WORLD );
		}
	}
}

void ParallelCore::sendMyBestResultTo0() {
	if (logSendMyMax) cout << my_rank << " sending my max" << endl;
	int numOfNodes = inputMatrix->getNumberOfNodes();
	int16_t* buffer = new int16_t[numOfNodes];
	MPI_Send(maxresult, numOfNodes, MPI_INT16_T, 0, MSG_FINAL_RESULT, MPI_COMM_WORLD );
	if (logSendMyMax) cout << my_rank << "  sending my max    --end" << endl;
	delete buffer;
}

short ParallelCore::isNotReasonToContinue() {
	if (maxdepth > (inputMatrix->getNumberOfNodes() / 2)) {
		return true;
	}
	if(maxResultDepthGlobal > (inputMatrix->getNumberOfNodes() / 2)) {
		return true;
	}
	return false;
}

void ParallelCore::doOneSolvingStep() {
	if (task->getLastExpandMovementIndex() != EMPTY && isHerePointToTryThisBranch() && isPossibleClique(task->getActualState(), task->getDepth() + 1)) {
		task->nexByInreaseDepth(); // so far its clique, so increase depth
	} else {
		task->nextByWidth(); // its not clique, try next node at same level
		if (task->getLastExpandMovementIndex() == EMPTY) {
			task->decreaseDepth(); // there are not nodes at same level
			checkAndSaveMaximum(task);
			if (isNotReasonToContinue()) {
				task->setTaskSolved();
			}
			task->nextByWidth(); // at actual level node was already solved, try next at the same level
		}
	}
}

bool ParallelCore::isHerePointToTryThisBranch() {
	int depth = task->getDepth();
	int lastIndex = task->getLastExpandMovementIndex();
	int nodeDegree = inputMatrix->getNodeDegree(lastIndex);
	if (nodeDegree <= maxdepth) {
		cout << "vetev byla oriznuta v hloubce " << depth << " kvuli " << lastIndex << " ktery ma " << nodeDegree << " jelikoz lokalni maximum je " << maxdepth << endl;
		return false;
	}
	if (nodeDegree <= maxResultDepthGlobal) {
		cout << "vetev byla oriznuta v hloubce " << depth << " kvuli " << lastIndex << " ktery ma " << nodeDegree << "jelikoz globalni maximum je " << maxResultDepthGlobal << endl;
		return false;
	}
	return true;
}

void ParallelCore::sendTokenToNext(int isDirty) {
	isTokenDirty = false;
	iHaveToken = false;
	int dest = ((my_rank + 1) % process_count);
	if (logSendTokenStart) cout << my_rank << " start sending token to " << dest << " with value " << isDirty << endl;
	MPI_Send(&isDirty, 1, MPI_INT, dest, MSG_TOKEN, MPI_COMM_WORLD );
	if (logSendToken) cout << my_rank << " end sending token to " << dest << " with value " << isDirty << endl;
}

void ParallelCore::workFinishAskForNew() {
	if (iHaveToken) {
		if (my_rank == 0) {
			sendTokenToNext(false);
		} else {
			sendTokenToNext(isTokenDirty);
		}
	}
	if (waitForTaskResponse) {
		return;
	}
	waitForTaskResponse = true;
	srand(time(NULL));

	int dest = 0;
	do {
		dest = rand() % process_count;
	} while (dest == my_rank);

	int i = 1;
	if (logSendTaskRequest) cout << my_rank << " start sending work request to " << dest << endl;
	MPI_Send(&i, 1, MPI_INT, dest, MSG_WORK_REQUEST, MPI_COMM_WORLD );
	if (logSendTaskRequest) cout << my_rank << " end sending work request to " << dest << endl;
}

void ParallelCore::tokenRecieved() {
	int recievedToken;
	if (logSendTokenRecieveStart) cout << my_rank << " start receiving token from " << status.MPI_SOURCE << endl;
	MPI_Recv(&recievedToken, 1, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
	if (logSendTokenRecieve) cout << my_rank << " end receiving token from " << status.MPI_SOURCE << " with value " << recievedToken << endl;
	iHaveToken = true;
	isTokenDirty = isTokenDirty | recievedToken;

	if (my_rank == 0 && isTokenDirty == false) {
		shouldContinueSearch = false;
		for (int destination = 1; destination < process_count; destination++) {
			if (logSendFinish) cout << my_rank << " start sending finish to " << destination << endl;
			MPI_Send(&isTokenDirty, 1, MPI_INT, destination, MSG_FINISH, MPI_COMM_WORLD );
			if (logSendFinish) cout << my_rank << " end sending finish to " << destination << endl;
		}
	}
}

int16_t* ParallelCore::doParallel(double r_p) {
	maxResultDepthGlobal = r_p * inputMatrix->getNumberOfNodes();
	MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
	MPI_Comm_size(MPI_COMM_WORLD, &process_count);
	task = new Task(inputMatrix->getNumberOfNodes());
	if (my_rank == 0) {
		iHaveToken = true;
		task->createFullTask();
	}
	findClique();
	return maxresult;
}

void ParallelCore::findClique() {

	int counter = 1;
	int flag = 0;

	while (shouldContinueSearch) {
		counter++;
		if ((counter % CHECK_MSG_AMOUNT) == 0) {
			MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag, &status);
			if (flag) {
				switch (status.MPI_TAG) {
				case MSG_WORK_REQUEST:
					if (logMainStates) cout << my_rank << " switch MSG_WORK_REQUEST" << endl;
					int tmp1;
					MPI_Recv(&tmp1, 1, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
					task->splitAndSend(status.MPI_SOURCE, &isTokenDirty);
					break;
				case MSG_WORK_SENT:
					if (logMainStates) cout << my_rank << " switch MSG_WORK_SENT" << endl;
					if (task->haveWork()) {
						cout << my_rank << " throwing  exception: I have job but somebody send me new task" << endl;
						throw new exception();
					}
					task->receiveNewOne(&status);
					waitForTaskResponse = false;
					break;
				case MSG_WORK_NOWORK:
					if (logMainStates) cout << my_rank << " switch MSG_WORK_NOWORK" << endl;
					int tmp2;
					if (logReceiveNoWork) cout << my_rank << " start receiving NOWORK from " << status.MPI_SOURCE << endl;
					MPI_Recv(&tmp2, 1, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
					if (logReceiveNoWork) cout << my_rank << " end receiving NOWORK from " << status.MPI_SOURCE << endl;
					waitForTaskResponse = false;
					workFinishAskForNew();
					break;
				case MSG_TOKEN:
					if (logMainStates) cout << my_rank << " switch MSG_TOKEN" << endl;
					tokenRecieved();
					break;
				case MSG_FINISH:
					if (logMainStates) cout << my_rank << " switch MSG_FINISH" << endl;
					sendMyBestResultTo0();
					shouldContinueSearch = false;
					break;
				case MSG_MAX_DEPTH:
					int tmp3;
					MPI_Recv(&tmp3, 1, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
					if (tmp3 > maxResultDepthGlobal) {
						maxResultDepthGlobal = tmp3;
						if (logReceiveMax) cout << my_rank <<" received new global max depth " << maxResultDepthGlobal << endl;
					}
					break;
				case MSG_FINAL_RESULT:
					cout << " !!!!!!!!!!!!!!! error message type MSG_FINAL_RESULT !!!!!!!!!!!!!!!!!!!" << endl;
					break;
				default:
					cout << " !!!!!!!!!!!!!!! error message type was not recognized !!!!!!!!!!!!!!!!!!!" << " tag: " << status.MPI_TAG << endl;
					break;
				}
			}
		}
		if (shouldContinueSearch) {
			if (!task->haveWork()) {
				if (logMainStates) cout << my_rank << " no work for me" << endl;
				workFinishAskForNew();
			} else {
				if (logMainStates) cout << my_rank << " do one step" << endl;
				doOneSolvingStep();
			}
		}
	}

	//collect best result
	if (my_rank == 0) {
		mergeMaxResults(flag);
	}
}

short ParallelCore::isPossibleClique(int16_t* candidate, int sizeOfCandidate) {
	if (logBasicDebugCore) logPrintResult(candidate, inputMatrix->getNumberOfNodes());
	if (logBasicDebugCore) cout << "size of candidate to clique: " << sizeOfCandidate << endl;

	for (int i = 0; i < sizeOfCandidate; i++) {
		for (int j = 0; j < sizeOfCandidate; j++) {
			if (i == j) {
				continue;
			}
			if (isConnected(candidate[i], candidate[j]) == 0) {
				return 0;
			}
		}
	}
	return 1;
}

short ParallelCore::isConnected(int node1, int node2) {
	if (inputMatrix->getVlaue(node1, node2) == 1) {
		return 1;
	}
	if (logIsNotConnected) logPrintNodesNotConnected(node1, node2);
	return 0;
}

void ParallelCore::mergeMaxResults(int flag) {
	int resultsReceived = 0;
	int numNodes = inputMatrix->getNumberOfNodes();
	int depthOfNewResult;
	int bufferSize = numNodes * sizeof(int16_t);
	int16_t* newResult;
	//print
	int size = 0;
	if (logMergePart) {
		cout << my_rank << " solution before merge with others max  ";
		for (int i = 0; i < inputMatrix->getNumberOfNodes(); ++i) {
			if (maxresult[i] > 0) {
				cout << maxresult[i] << " ";
				size++;
			}
		}
	}
	if (logMergePart) cout << " my max size at beginning is " << size << endl;
	while (resultsReceived != process_count - 1) {
		MPI_Iprobe(MPI_ANY_SOURCE, MSG_FINAL_RESULT, MPI_COMM_WORLD, &flag, &status);
		if (flag) {
			resultsReceived++;
			if (logMergePart) cout << " catching MSG_FINAL_RESULT " << status.MPI_SOURCE << endl;
			newResult = new int16_t[numNodes];

			int sizeOfNewResult = 0;
			MPI_Recv(newResult, bufferSize, MPI_INT16_T, MPI_ANY_SOURCE, MSG_FINAL_RESULT, MPI_COMM_WORLD, &status);
			while (newResult[sizeOfNewResult] != -1) {
				sizeOfNewResult++;
			}
			if (logMergePart) cout << "receive max result depth from " << status.MPI_SOURCE << " o with size " << sizeOfNewResult<< endl;
			depthOfNewResult = sizeOfNewResult - 1;

			if (depthOfNewResult > maxdepth) {
				if (logMergePart) cout << " I'll overwrite my max depth " << maxdepth << " with value " << depthOfNewResult << endl;
				for (int i = 0; i <= depthOfNewResult; i++) {
					maxresult[i] = newResult[i];
				}
				maxdepth = depthOfNewResult;
			} else {
				if (logMergePart) cout << " received max depth from " << status.MPI_SOURCE << " is not big as my maximum depth " << maxdepth << endl;
			}
			delete newResult;
		}
	}
}

void ParallelCore::logPrintNodesNotConnected(int node1, int node2) {
	cout << "neni connect " << node1 << "  " << node2 << endl;
	int index = inputMatrix->getVlaue(node1, node2);
	cout << "nodes=" << inputMatrix->getNumberOfNodes() << "   index=" << index << "  node1=" << node1 << "  node2=" << node2 << "  v matici="
			<< inputMatrix->getVlaue(node1, node2) << endl;
}

