/*
 *    /||\
 *   / || \
 *  /__||__\
 *   __
 *  \  | UNIVERSIDADE DE PASSO FUNDO
 *   \ | Instituto de Ciências Exatas e Geociências
 *    \| Curso de Ciência da Computação
 * 
 * Trabalho de Conclusão II - 2008/2
 * 
 * Título do trabalho: Paralelização do Solver Intervalar LSS
 * Acadêmico: Alexandre Almeida
 * Orientador: Prof. Dr. Carlos Amaral Hölbig
 * 
 * Módulo: IminusABminusCB.h / IminusABminusCB.cpp
 * Descrição: Implementa a versão paralela do cálculo da operação 12 da Tabela xxx do TCC.
 * 
 */

#include "IminusABminusCB.h"

/**
 * calculateDR2temp - Function called by the SlaveLss class. All the slaves receive a slice of R2 matrix from the
 *                    master process. The part of R1 is the one that was received by the process when executing the
 *                    IminusAB function, so each process have to perform an MPI_Allgather operation to gather again
 *                    the R1 matrix.
 *            
 * Input parameters:
 *    rmatrix& sliceR1: slice of R1 matrix, reused from the IminusAB function;
 *    rmatrix& sliceR2: slice of R2 matrix that will be received from the master;
 *            int rank: process rank;
 *      int numThreads: number of threads that the computation should be subdivided into;
 * 
 * Output parameters:
 *  fstream& debugFile: reference to an already opened debug file.
 * 
 * Return value: void.
 * 
 */
void IminusABminusCB(rmatrix& a, rmatrix& c, rmatrix& b, imatrix& r, int rank, int numThreads, int worldCommSize){
	int slice = (Ub(a, ROW) - Lb(a, ROW) + 1) / worldCommSize;
	
	imatrix partialResult(1, slice, Lb(a, COL), Ub(a, COL));

	if(numThreads > 1){
		partialIminusABminusCB(a, c, b, partialResult, numThreads);
	} else {
		partialIminusABminusCB(a, c, b, partialResult);
	}
	
	MPI_Gather(r, partialResult, MASTER, MPI_COMM_WORLD);
	
	return;
}

/**
 * calculateDR2temp - Function called by the SlaveLss class. All the slaves receive a slice of R2 matrix from the
 *                    master process. The part of R1 is the one that was received by the process when executing the
 *                    IminusAB function, so each process have to perform an MPI_Allgather operation to gather again
 *                    the R1 matrix.
 *            
 * Input parameters:
 *    rmatrix& sliceR1: slice of R1 matrix, reused from the IminusAB function;
 *    rmatrix& sliceR2: slice of R2 matrix that will be received from the master;
 *            int rank: process rank;
 *      int numThreads: number of threads that the computation should be subdivided into;
 * 
 * Output parameters:
 *  fstream& debugFile: reference to an already opened debug file.
 * 
 * Return value: void.
 * 
 */
void IminusABminusCB(rmatrix& a, rmatrix& c, rmatrix& sliceB, int rank, int numThreads){	
	imatrix partialResult(Lb(a, ROW), Ub(a, ROW), Lb(a, COL), Ub(a, COL));
	
	SetLb(partialResult, Lb(a, ROW), ROW);
	SetUb(partialResult, Ub(a, ROW), ROW);
		
	if(numThreads > 1){
		partialIminusABminusCB(a, c, sliceB, partialResult, numThreads);
	} else {
		partialIminusABminusCB(a, c, sliceB, partialResult);
	}
	
	MPI_Gather(partialResult, partialResult, MASTER, MPI_COMM_WORLD);
	
	return;
}

/**
 * partialIminusABminusCB
 * 
 * Input parameters:
 *    rmatrix& a:
 *    rmatrix& c:
 *    rmatrix& b:
 * 
 * Output parameters:
 *    imatrix& r:
 * 
 * Return value: void.
 * 
 */
void partialIminusABminusCB(rmatrix& a, rmatrix& c, rmatrix& b, imatrix& r){
	dotprecision ac(0);
	
	for(int i = Lb(r, ROW); i <= Ub(r, ROW); i++){
		for(int j = Lb(r, COL); j <= Ub(r, COL); j++){
			ac = (i == j) ? 1.0 : 0.0;
			accumulate(ac, -a[i], b[Col(j)]);
			accumulate(ac, -c[i], b[Col(j)]);
			rnd(ac, r[i][j]);
		}
	}
	
	return;
}

/**
 * partialIminusABminusCB
 * 
 * Input parameters:
 *    rmatrix& a:
 *    rmatrix& c:
 *    rmatrix& b:
 * 
 * Output parameters:
 *    imatrix& r:
 * 
 * Return value: void.
 * 
 */
void partialIminusABminusCB(rmatrix& a, rmatrix& c, rmatrix& b, imatrix& r, int nt){
	int numThreads = nt, nRowsR = 0, threadSlice = 0;
 	
 	threadArgsIminusABminusCB args[numThreads];
 	
 	pthread_t threads[numThreads];
	pthread_attr_t attr;
	
	pthread_attr_init(&attr);
	pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
	
	nRowsR = Ub(r, ROW) - Lb(r, ROW) + 1;
	threadSlice = nRowsR / numThreads;
	
	for(int t = 0; t < numThreads; t++){
		args[t].threadId = t;
		args[t].a = &a;
		args[t].c = &c;
		args[t].b = &b;
		args[t].r = &r;
		
		args[t].startRowA = Lb(r, ROW) + (t * threadSlice);
		args[t].endRowA = Lb(r, ROW) + ((t+1) * threadSlice) - 1;
		
		pthread_create(&threads[t], &attr, threadPartialIminusABminusCB, &args[t]);
	}
 	
	// Joins the threaded execution
	for(int t = 0; t < numThreads; t++){
		pthread_join(threads[t], NULL);
	}
 	
	return;
}

/**
 * partialIminusABminusCB
 * 
 * Input parameters:
 *    rmatrix& a:
 *    rmatrix& c:
 *    rmatrix& b:
 * 
 * Output parameters:
 *    imatrix& r:
 * 
 * Return value: void.
 * 
 */
void *threadPartialIminusABminusCB(void *tArgs){
	threadArgsIminusABminusCB *args = (threadArgsIminusABminusCB *) tArgs;
	
	rmatrix &a = *(args->a);
	rmatrix &c = *(args->c);
	rmatrix &b = *(args->b);
	imatrix &r = *(args->r);
	
	dotprecision ac(0);
	
	for(int i = args->startRowA; i <= args->endRowA; i++){
		for(int j = Lb(r, COL); j <= Ub(r, COL); j++){
			ac = ( i == j) ? 1.0 : 0.0;
			accumulate(ac, -a[i], b[Col(j)]);
			accumulate(ac, -c[i], b[Col(j)]);
			rnd(ac, r[i][j]);
		}
	}
	
	pthread_exit(NULL);
}
