#include <ParallelJacobi.hpp>
#include <algorithm>
#include <cmath>
#include <mpi.h>

// Construtor
ParallelJacobi::ParallelJacobi (std::vector<std::vector<double> >& A,
				 		        std::vector<double>& B,
				 		    	std::vector<double>& x,
				 		    	double error,
				 		    	int maxIterations,
				 		    	int pid,
				 		    	int nprocesses) :
A(A), B(B), x(x), error(error), maxIterations(maxIterations),
solved(false), pid(pid), nprocesses(nprocesses)
{
	// Nothing to be done here.
}

std::vector<double>
ParallelJacobi::solve(int * iterations) {
	if (solved) return x;

	std::vector<double> vx[2];
	vx[0] = x;
	vx[1] = x;
	int curx = 0;
	int myIterations = 0;
	int n = A.size();
	int i, j;
	double packet[2];
	MPI::Status status;

	int k = nprocesses, np = nprocesses;

	int res = 0; // Por quantos valores e' responsavel
	for(i=pid;i<n;i+=k) res++;
	int wait = n - res; // Quantos valores precisa esperar

	double * buffer; int bsz;
	buffer = new double[n + 2];

	// Caso seja o processo 0, tambem ira receber o maxX e 
	// maxDiff dos outros processos, para decidir se sera
	// necessaria mais uma iteracao, portanto, wait e' maior
	// Obs: os pacotes maxX e maxDiff possuem indices -1 e -2,
	// respectivamente!

	if (!pid) wait += (np-1) * 2;

	// Prepara apenas as linhas que serao utilizadas pelo processo
	#pragma omp parallel for
	for (i = pid; i < n; i += k) {
		double diag = A[i][i];
		#pragma omp parallel for
		for (j = 0; j < n; j++) 
			A[i][j] /= diag;
		B[i] /= diag;
		A[i][i] = 0.0;
	}

	// Itera ate o limite de iteracoes ou ate atingir o erro especificado
	for (int biggerThanError = 1;
		 myIterations < maxIterations && biggerThanError; 
		 myIterations++) {

		#pragma omp parallel for
		for (i = pid; i < n; i += k) {
			vx[curx][i] = B[i];
			#pragma omp parallel for
			for (j = 0; j < n; j++)
				vx[curx][i] -= A[i][j] * vx[!curx][j];
		}

		double maxDiff = 0.0;
		double maxX = error;
		int ii = 0;

		// Colhe o maior X e a maior diferenca para checagem de convergencia
		for (i = pid; i < n; i += k) {
			buffer[ii] = vx[curx][i];
			ii++;
			maxX = std::max(maxX, std::abs(vx[curx][i]));
			maxDiff = std::max(maxDiff, std::abs(vx[curx][i] - vx[!curx][i]));
		}
		buffer[res] = maxX;
		buffer[res+1] = maxDiff;

		// Todos calcularam seus valores de X. Agora trocaram entre si atraves do mestre
		MPI::COMM_WORLD.Barrier();

		// Primeiro envia seus proprios valores
		if (pid) {

			MPI::COMM_WORLD.Send(&res, 1, MPI_INT, 0, 1);
			MPI::COMM_WORLD.Send(buffer, res+2, MPI_DOUBLE, 0, 1);

		} else { // Se mestre, espera os valores dos outros processos
			for (i = 1; i < np; i++) {
				MPI::COMM_WORLD.Recv(&bsz, 1, MPI_INT, i, MPI_ANY_TAG);
				bsz += 2;
				for (int st = 0; st < bsz; ) {
					MPI::COMM_WORLD.Recv(buffer, bsz, MPI_DOUBLE, i, MPI_ANY_TAG, status);
					st += status.Get_count(MPI_INT);
				}
				ii = 0;
				for (j = i; j < n; j += k)
					vx[curx][j] = buffer[ii++];
				maxX = std::max(maxX, buffer[ii++]);
				maxDiff = std::max(maxDiff, buffer[ii]);
			}
		}

		// Se preparam para receberem decisao do mestre (continuar
		// ou parar)
		MPI::COMM_WORLD.Barrier();

		if (!pid) { // Se for o mestre
			// Toma a decisao
			if (maxDiff / maxX <= error) biggerThanError = 0;
		} 
		// Broadcast na decisao
		MPI::COMM_WORLD.Bcast(&biggerThanError, 1, MPI_INT, 0);

		if (biggerThanError) { // Caso a decidao tenha sido continuar, precisa
							   // dos outros valores de X
			MPI::COMM_WORLD.Barrier();

			if (!pid) { // Se for o mestre, envia os valores
				if (np > 1) { // prepara o buffer
					#pragma omp parallel for
					for (i = 0; i < n; i++)
						buffer[i] = vx[curx][i];
				}
			} 
			// Broadcast dos valores
			MPI::COMM_WORLD.Bcast(buffer, n, MPI_DOUBLE, 0);
			if (pid) {
				#pragma omp parallel for
				for (i = 0; i < n; i++) {
					vx[curx][i] = buffer[i];
				}
			}
		}
		curx = !curx;
	}


	if (iterations) *iterations = myIterations;

	if (np && res)
		delete[] buffer;

	x = vx[!curx];
	solved = true;
	return x;
}
