/*
 * LoadBalancing.cpp
 *
 *  Created on: 31/01/2014
 *      Author: root
 */

#include "LoadBalancing.h"
#include "Utils.h"
//Sacar a archivo de configuraciones
#define MAX_UNBALANCE_ALLOWED 0.8
#define BALANCING_DELAY 1

extern int size, rank;
extern SortedBuffer * tuples;
extern pthread_mutex_t tuples_mutex;
extern pthread_cond_t idle_cond;
extern pthread_mutex_t end_flag_mutex;
extern bool end;
extern Tuple * bestSolution;
extern pthread_mutex_t best_solution_mutex;

void bestSolutionExchange() {

	Tuple * ownBS;
	int currentBSCost;
	int currentBSOwner;

	pthread_mutex_lock(&best_solution_mutex);
	ownBS = bestSolution->copy();
	pthread_mutex_unlock(&best_solution_mutex);

	int recvBuff[size][Tuple::tuplesIntSize];

	MPI_Allgather(ownBS->getArray(), Tuple::tuplesIntSize, MPI_INT, recvBuff,
			Tuple::tuplesIntSize, MPI_INT, MPI_COMM_WORLD);

	currentBSCost = ownBS->cost();
	currentBSOwner = 0;

	for (int i = 0; i < size; i++) {
		if (Tuple::GetCostFromTupleArray(recvBuff[i]) < currentBSCost) {
			currentBSCost = Tuple::GetCostFromTupleArray(recvBuff[i]);
			currentBSOwner = i;
		}
	}

	if (currentBSCost < ownBS->cost()) {
		//cout<<"BestSolutionExchange: entro al if (currentBSCost < ownBS->cost()), rank: "<<rank<<endl;
		pthread_mutex_lock(&best_solution_mutex);
		delete bestSolution;
		bestSolution = new Tuple(Tuple::tuplesIntSize,
				recvBuff[currentBSOwner]);
		pthread_mutex_unlock(&best_solution_mutex);
		//cout<<"BestSolutionExchange: unlock bestSolution, rank: "<<rank<<endl;

	}

	delete ownBS;

}

double ComputeUnbalanceRate(int t, int n, int * CS) {
	//CS: CurrentState
	//BFS: Best Factible Solution
	//WFS: WorstFactibleSolution

	int div = t / n;
	int mod = t % n;

	//BFS = {ai / ai = (div+1) si 0 <= i <= mod, ai = div si mod <= i < n}
	//WFS = {ai / a0 = t, ai = 0 si 0 <= i < n}

	//Calcular distance(BFS,WFS) ^ 2
	double square_dist_BFS_WFS;
	square_dist_BFS_WFS =
			(mod > 0) ?
					pow(div + 1 - t, 2) + (mod - 1) * pow(div + 1, 2)
							+ (n - mod) * pow(div, 2) :
					pow(div - t, 2) + (n - 1) * pow(div, 2);

	//Calcular distance(BFS,CS) ^ 2
	int square_dist_BFS_CS = 0;

	for (int i = 0; i < mod; i++) {
		square_dist_BFS_CS += pow(div + 1 - CS[i], 2);
	}

	for (int i = mod; i < n; i++) {
		square_dist_BFS_CS += pow(div - CS[i], 2);
	}

	return sqrt(square_dist_BFS_CS / square_dist_BFS_WFS);
}

int LoadBalancing() {

	//debugPrinter(0,rank,"LoadBalancing",59,"Comenzando LoadBalancing");
	bool exitCondition;
	int numPrefs;
	double unbalanceRate;

	int currentDestribution[size];

	struct timespec ts;
	ts.tv_sec = time(NULL) + BALANCING_DELAY;

	MPI_Comm balancingComm;
	MPI_Comm_dup(MPI_COMM_WORLD, &balancingComm);

	int iter = 0;
	do {

		iter++;

		pthread_mutex_lock(&tuples_mutex);
		pthread_cond_timedwait(&idle_cond, &tuples_mutex, &ts);
		numPrefs = tuples->size();
		debugPrinter(0, rank, "LoadBalancing", 128, "", "iter", iter,
				"tuples->size()", tuples->size());
		pthread_mutex_unlock(&tuples_mutex);

		//Se distribuyen la cantidad de prefijos largos entre los procesos
		MPI_Allgather(&numPrefs, 1, MPI_INT, currentDestribution, 1, MPI_INT,
				balancingComm);

		bestSolutionExchange();

		//Calculo cantidad de prefijos largos en los procesos
		numPrefs = 0;

		for (int i = 0; i < size; i++) {
			numPrefs += currentDestribution[i];
		}

		//debugPrinter(0,rank,"LoadBalancing",96,"Despues de recorrer currentDestribution","numBalanceo",numBalanceo,"numPrefsProceso",aux,"numPrefsTotal",numPrefs);

		exitCondition = numPrefs == 0;

		if (exitCondition) {
			//debugPrinter(0,rank,"LoadBalancing",95,"exitCondition == TRUE","numBalanceo",numBalanceo);
			pthread_mutex_lock(&end_flag_mutex);
			end = true;
			pthread_mutex_unlock(&end_flag_mutex);

		} else {

			//Calcular el indíce de desbalanceo
			unbalanceRate = ComputeUnbalanceRate(numPrefs, size,
					currentDestribution);

			//debugPrinter(0,rank,"LoadBalancing",111,"AFTER Calcular el indice de balanceo","numBalanceo",numBalanceo,"unbalanceRate",unbalanceRate);

			if (unbalanceRate > MAX_UNBALANCE_ALLOWED) {

				pthread_mutex_lock(&tuples_mutex);
				//debugPrinter(0,rank,"LoadBalancing",166,"HAY QUE BALANCEAR!!!");
				numPrefs = tuples->size();
				MPI_Allgather(&numPrefs, 1, MPI_INT, currentDestribution, 1,
						MPI_INT, balancingComm);

				//Calculando parametros necesarios para la distribucion
				int div = numPrefs / size;
				int mod = numPrefs % size;

				int totalIntsToDestribute = 0;
				int numSenders = 0;
				int offsetCounter = 0;
				int myOffset = 0;
				int inc;

				int recvcount[size];
				int displs[size];

				for (int i = 0; i < size; i++) {

					displs[i] = totalIntsToDestribute;

					if (currentDestribution[i] >= div + 1) {

						numSenders++;
						inc = numSenders > mod ?
								currentDestribution[i] - div :
								currentDestribution[i] - div - 1;

						recvcount[i] = inc * Tuple::tuplesIntSize;
						totalIntsToDestribute += inc * Tuple::tuplesIntSize;

					} else {

						if (i == rank) {
							myOffset = offsetCounter;
						}

						recvcount[i] = 0;
						offsetCounter += div - currentDestribution[i];

					}

				}

				/*****************
				 if (!rank)
				 for (int i = 0; i < size; i++)
				 //debugPrinter(0,rank,"LoadBalancing",159,"Barrido de recvcount y displs","i",i,"recvcount[i]",recvcount[i],"displs[i]",displs[i]);
				 *****************/

				int prefsToSend = recvcount[rank] / Tuple::tuplesIntSize;

				int (*sendBuff)[Tuple::tuplesIntSize];
				int (*recvBuff)[Tuple::tuplesIntSize];
				sendBuff =
						prefsToSend == 0 ?
								NULL :
								(int (*)[Tuple::tuplesIntSize]) malloc(
										sizeof(int) * recvcount[rank]);
				recvBuff = (int (*)[Tuple::tuplesIntSize]) malloc(
						sizeof(int) * totalIntsToDestribute);

				if (prefsToSend != 0) {
					Tuple * tArray[prefsToSend];
					tuples->popKTuples(prefsToSend, tArray);

					for (int i = 0; i < prefsToSend; i++) {
						//"prefsToSend",prefsToSend,"i",i
						//debugPrinter(0,rank,"LoadingBalancing",231,"","prefsToSend",prefsToSend,"tuples->size()",tuplesSize);
						memcpy(sendBuff[i], tArray[i]->getArray(),
								sizeof(int) * Tuple::tuplesIntSize);
						//FIXME Se asume que el balanceo siempre es exitoso pues se eliminan las tuplas
						delete tArray[i];

					}
				}

				//Se distribuyen los prefijos entre los procesos
				MPI_Allgatherv(sendBuff, prefsToSend * Tuple::tuplesIntSize,
				MPI_INT, recvBuff, recvcount, displs, MPI_INT, balancingComm);

				/********
				 sleep(rank + 2);

				 for (int i = 0; i < totalIntsToDestribute / Tuple::tuplesIntSize; i++){
				 for (int j = 0; j < Tuple::tuplesIntSize; j++){
				 //debugPrinter(0,rank,"LoadBalancing",194,"barrido de recvbuff","i",i,"j",j,"recvBuff[i][j]",recvBuff[i][j]);
				 }
				 }
				 *******/

				//Si estoy recibiendo prefijos los copio en prefixes desde recvBuff
				int recvQty;
				if ((recvQty = div - currentDestribution[rank]) > 0) {

					Tuple * tArray[recvQty];

					for (int i = 0; i < recvQty; i++) {

						tArray[i] = new Tuple(Tuple::tuplesIntSize,
								recvBuff[myOffset + i]);

					}

					tuples->insert(tArray, recvQty);
				}

				//Si tengo prefijos nuevos realizo signal sobre la variable de condicion idle_cond por si el thread de procesamiento
				//de tuplas se encuentra detenido a la espera.
				if (tuples->size() > 0) {
					//debugPrinter(0,rank,"LoadBalancing",217,"SIGNAL","numBalanceo",numBalanceo);
					pthread_cond_signal(&idle_cond);
				}

				pthread_mutex_unlock(&tuples_mutex);

				free(sendBuff);
				free(recvBuff);

			}

		}

	} while (!exitCondition);

	MPI_Comm_free(&balancingComm);

	return 0;
}

