/*
 *    /||\
 *   / || \
 *  /__||__\
 *   __
 *  \  | UNIVERSIDADE DE PASSO FUNDO
 *   \ | Instituto de Ciências Exatas e Geociências
 *    \| Curso de Ciência da Computação
 * 
 * Trabalho de Conclusão II - 2008/2
 * 
 * Título do trabalho: Paralelização do Solver Intervalar LSS
 * Acadêmico: Alexandre Almeida
 * Orientador: Prof. Dr. Carlos Amaral Hölbig
 * 
 * Módulo: IminusAB.h / IminusAB.cpp
 * Descrição: Implementa a versão paralela do cálculo da operação 8 da Tabela xxx do TCC.
 * 
 */

#include "IminusAB.h"

/**
 * IminusAB - Function called by the MasterLss class. This function distributes the rmatrix "a" among the processors in a stripped partitioning fashion,
 * 			  and broadcasts the entire "b" matrix to all the slaves. Each slave, including the master, can compute
 *            its parts using threads posix, depending on the "numThreads" parameter. If it's greater than 1,
 *            then the program creates the number of threads especified in this parameter.
 *            When all the processes finish their execution, the master process gathers the partial results and stores
 *            them into the variables "c" and "ab", which are output parameters of the function.
 *            Also, every data exchange between the master and the slaves are performed using collective communication functions,
 *            such as MPI_Bcast, MPI_Gather and MPI_Scatter. 
 *            
 * Input parameters:
 *          rmatrix& a: this matrix will be split among the processes using a MPI_Scatter operation;
 *          rmatrix& b: this matrix will be broadcast among the processes using a MPI_Bcast operation;
 *            int rank: rank of the master process;
 *      int numThreads: number of threads that the computation should be divided into;
 *   int worldCommSize: number of MPI processes that the MPI_COMM_WORLD communicator is composed;
 * 
 * Output parameters:
 *          imatrix& c: result of the operation (I - (a * b)), where I is the identity matrix;
 *         rmatrix& ab: result of the operation (a * b). This product is stored so that the solver
 *                      is able to re-use its values in a further step;
 *  fstream& debugFile: reference to an already opened debug file.
 * 
 * Return value: void.
 * 
 */
void IminusAB(rmatrix& a, rmatrix& b, imatrix& c, rmatrix& ab, int rank, int numThreads, int worldCommSize){
	// Get the number of rows of "a"
	int size = Ub(a, ROW) - Lb(a, ROW) + 1;
	// Calculate the slice size of "a"
	int slice = size / worldCommSize;
	
	// sliceA will contein the part of "a" for the master process
	rmatrix sliceA;
	
	// sliceC and sliceAB will be the partial results computed by the master
	imatrix sliceC(1, slice, Lb(a, COL), Ub(a, COL));
	rmatrix sliceAB(1, slice, Lb(a, COL), Ub(a, COL));
	
	// Scatters the matrix "a" among the processes, including the master itself
	MPI_Scatter(a, sliceA, slice, rank, MASTER, MPI_COMM_WORLD);
	// Broadcasts the matrix "b" to all the processes
	MPI_Bcast(b, rank, MASTER, MPI_COMM_WORLD);
	
	
	if(numThreads > 1){
		// Calculate the partial using numThreads threads
		partialIminusAB(sliceA, b, sliceC, sliceAB, numThreads);
	} else {
		partialIminusAB(sliceA, b, sliceC, sliceAB);
	}
	
	// At the end, the master gathers all the partial results from the processes into the "c" and "ab" matrices
	MPI_Gather(c, sliceC, MASTER, MPI_COMM_WORLD);
	MPI_Gather(ab, sliceAB, MASTER, MPI_COMM_WORLD);

	return;
}

/**
 * IminusAB - Function called by the SlaveLss class. Through this function, all the slave processes receive
 *            a slice of matrix "a" from the master and the entire matrix "b", so that it is able to multiply
 *            both matrices. In addition, the partial computation can be performed using threads posix or not,
 *            depending on the parameter "numThreads". When the processes finish their execution, the partial results
 *            stored in "c" and "ab" are gathered on the master process through an MPI_Gather operation.  
 * 
 * Input parameters:
 *          rmatrix& a: matrix where the slice of "a" will be received;
 *            int rank: process rank;
 *      int numThreads: number of threads that the computation should be subdivided into;
 * 
 * Output parameters:
 *  fstream& debugFile: reference to an already opened debug file.
 * 
 * Return value: void.
 * 
 */
void IminusAB(rmatrix& a, rmatrix& b, int rank, int numThreads){
	// Get the slice of "a", scattered by the master
	MPI_Scatter(a, a, 0, rank, MASTER, MPI_COMM_WORLD);
	// Get the matrix "b"
	MPI_Bcast(b, rank, MASTER, MPI_COMM_WORLD);
	
	// "c" and "ab" will be the partial results computed by the current slave
	imatrix c(Lb(a, ROW), Ub(a, ROW), Lb(b, COL), Ub(b, COL));
	rmatrix ab(Lb(a, ROW), Ub(a, ROW), Lb(b, COL), Ub(b, COL));
	
	if(numThreads > 1){
		// Calculate the partial using numThreads threads
		partialIminusAB(a, b, c, ab, numThreads);
	} else {
		partialIminusAB(a, b, c, ab);
	}
	
	// Send the partial results to the master process
	MPI_Gather(c, c, MASTER, MPI_COMM_WORLD);
	MPI_Gather(ab, ab, MASTER, MPI_COMM_WORLD);
	
	return;
}

/**
 * threadedCalcParcial - Function called by the IminusAB functions. Calculates the partial results
 *                       of the matrix-matrix product between "a" and "b". Also, each slave subdivides the computation
 *                       into "nt" threads.
 * 
 * Input parameters:
 *   rmatrix& a: slice of matrix a;
 *   rmatrix& b: full matrix b;
 *       int nt: number of threads.
 * 
 * Output parameters:
 *   rmatrix& c: result of I - a * b;
 *  rmatrix& ab: result of a * b.
 *  
 * Return value: void.
 * 
 */
void partialIminusAB(rmatrix& a, rmatrix& b, imatrix& c, rmatrix& ab, int nt){
	int numThreads = nt, nRowsA = 0, threadSlice = 0;
	// Arguments for each thread
	threadArgsIminusAB args[numThreads];

	pthread_t threads[numThreads];
	pthread_attr_t attr;

	// Set the threads as joinable	
	pthread_attr_init(&attr);
	pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
	
	// Calculates the number of rows of the slice
	nRowsA = Ub(a, ROW) - Lb(a, ROW) + 1;
	// Computes the slice size for each thread, so that the computation can be subdivided correctly
	threadSlice = nRowsA / numThreads;
	
	// Loop that sets the arguments for each thread, and triggers them
	for(int t = 0; t < numThreads; t++){
		args[t].threadId = t;
		args[t].sliceA = &a;
		args[t].b = &b;
		args[t].sliceC = &c;
		args[t].sliceAB = &ab;
		// The members startRowA and endRowA are used to specify the index limits that each thread should access the matrix "a" 
		args[t].startRowA = Lb(a, ROW) + (t * threadSlice);
		args[t].endRowA = Lb(a, ROW) + ((t+1) * threadSlice) - 1;
		
		// Trigger the thread
		pthread_create(&threads[t], &attr, threadPartialIminusAB, &args[t]);
	}
	
	pthread_attr_destroy(&attr);
	
	// Join the threaded execution
	for(int t = 0; t < numThreads; t++){
		pthread_join(threads[t], NULL);
	}
	
	return;
}

/**
 * calcThreadParcialIminusAB - Function executed by each thread.
 * 
 * Input parameters:
 *  void *tArgs: a pointer to the structure threadArgsIminusAB.
 * 
 * Output parameters: none.
 *  
 * Return value: void *
 * 
 */
void *threadPartialIminusAB(void *tArgs){
	// Dereference the arguments
	threadArgsIminusAB *args = (threadArgsIminusAB *) tArgs;
	
	// The members sliceA, b, sliceC and sliceAB point to references of the C-XSC types
	rmatrix &a = *(args->sliceA);
	rmatrix &b = *(args->b);
	imatrix &c = *(args->sliceC);
	rmatrix &ab = *(args->sliceAB);
	
	dotprecision ac(0);
	
	// Performs the computation
	for(int i = args->startRowA; i <= args->endRowA; i++){
		for(int j = Lb(b, COL); j <= Ub(b, COL); j++){
			ac = 0;
			accumulate(ac, a[i], b[Col(j)]);
			rnd(ac, ab[i][j]);
			
			if (i == j){
				ac = 1.0 - ac;
			}
			//ac = (i == j) ? 1.0 - ac : ac;
			rnd(ac, c[i][j]);
		}
	}
	
	pthread_exit(NULL);
}

/**
 * calcThreadParcialIminusAB - Function called when the user set the execution not to use threads.
 * 
 * Input parameters:
 *   rmatrix& a: slice of matrix a;
 *   rmatrix& b: full matrix b;
 * 
 * Output parameters:
 *   rmatrix& c: result of I - a * b;
 *  rmatrix& ab: result of a * b.
 * 
 * Return value: void.
 * 
 */
void partialIminusAB(rmatrix& a, rmatrix& b, imatrix& c, rmatrix& ab){
	dotprecision ac(0);
	
	for(int i = Lb(a, ROW); i <= Ub(a, ROW); i++){
		for(int j = Lb(b, COL); j <= Ub(b, COL); j++){
			ac = 0.0;
			accumulate(ac, a[i], b[Col(j)]);
			rnd(ac, ab[i][j]);
			
			if(i == j){
				ac = 1.0 - ac;
			}
			
			//ac = (i == j) ? 1.0 - ac : ac;
			rnd(ac, c[i][j]);
		}
	}
	
	return;
}
