//  AUTHOR: Zachary D. Taylor, Stephen Durfey, Matthew Wehinger
//
//  DATE: 10/14/2010
//
//  PURPOSE: This program performs matrix multiplication in sequence
//  and in parallel using static.  The execution
//  times are compared and the speedup is calculated.  Matrices are
//  implemented as dynamic, two-dimensional arrays.

#include <iostream>
#include <stdlib.h>
#include <time.h>
#include <mpi.h>
#include <math.h>

using std::cin;
using std::cout;
using std::endl;

//  Function prototypes
void initializeMatrix(double **, int, int);
void populateMatrix(double **, int, int);
void matrixMultiplyS(double **, double **, double **, int , int , int);
void deleteMatrix(double **, int);
double calculateSpeedup(double, double);
void print(double **, int, int);

// Begin main function
int main(int argc, char** argv)
{
	//  Matrix dimensions
	int m, n, k, error;

	// Number of threads
	int threads = 0, id = 0;
	MPI_Status status;
	MPI_Datatype typeA, typeB, typeC, typeWork;
	
	
	int chunkSize = 0, chunkRemainder = 0, start, end;

	//  Dynamic arrays
	double ** matrixA, ** matrixB, ** matrixC, **myWork;
		//  Time related variables
	double startTime, endTime, startTimeComm, endTimeComm;
	double executionTimeS, executionTimePS, executionTimePR, executionTimePE, speedUp, commTime;
	


	MPI_Init(&argc, &argv);
	MPI_Comm_size(MPI_COMM_WORLD, &threads);
	MPI_Comm_rank(MPI_COMM_WORLD, &id);


	m = atoi(argv[1]);
	n = atoi(argv[2]);
	k = atoi(argv[3]);
	
	chunkSize = m / threads;
	chunkRemainder = m % threads;


	//  Allocate space for arrays
	matrixA = new double*[m];
	matrixB = new double*[n];
	matrixC = new double*[m];
	myWork = new double*[chunkSize];
	
	int * counts = new int[threads];
	for(int i = 0; i < threads; i++)
	{
		counts[i] = chunkSize*k;
	}	
	
	int * offsets = new int[threads];
	offsets[0] = 0;
	for(int i = 1; i < threads; i++)
	{
		offsets[i] = i*chunkSize*k;
	}
		
	int * rowStart = new int[threads];
	rowStart[0] = 0;
	int * rowEnd = new int[threads];
	rowEnd[0] = 1;
	
	
	double * myWork2 = new double[chunkSize*k];
	double * tempC = new double[m*k];


	if(!matrixA || !matrixB || !matrixC || !myWork || !myWork2)
	{
		cout << "Memory allocation failed.  Exiting..." << endl;
		return(1);
	}

	//  Initialize matrices to zero
	initializeMatrix(matrixA, m, n);
	initializeMatrix(matrixB, n, k);
	initializeMatrix(matrixC, m, k);
	initializeMatrix(myWork, chunkSize, k);
	//initializeMatrix(myWork2, chunkSize, k);
	
	
	MPI_Type_contiguous(n, MPI_DOUBLE, &typeA);
	MPI_Type_commit(&typeA);
	MPI_Type_contiguous(k, MPI_DOUBLE, &typeB);
	MPI_Type_commit(&typeB);
	MPI_Type_contiguous(k, MPI_DOUBLE, &typeC);
	MPI_Type_commit(&typeC);
	MPI_Type_contiguous(k, MPI_DOUBLE, &typeWork);
	MPI_Type_commit(&typeWork);
	//MPI_Type_contiguous(k, MPI_DOUBLE, &typeWork2);
	//MPI_Type_commit(&typeWork2);
	
	if(id == 0)
	{
		//  Populate matrices with random values
		populateMatrix(matrixA, m, n);
		populateMatrix(matrixB, n, k);
		

		////  Multiply matrices sequentially
		////  Measure execution time
		//~ startTime = MPI_Wtime();
		//~ matrixMultiplyS(matrixA, matrixB, matrixC, m, n, k);
		//~ endTime = MPI_Wtime();
		//~ executionTimeS = endTime - startTime;

		initializeMatrix(matrixC, m, k);
		
		startTime = MPI_Wtime();
		startTimeComm = MPI_Wtime();

		MPI_Bcast(&chunkSize, 1, MPI_INT, 0, MPI_COMM_WORLD);

		for(int x = 0; x < m; x++)
		{
			MPI_Bcast(&(matrixA[x][0]), 1, typeA, 0, MPI_COMM_WORLD);
		}

		for(int x = 0; x < n; x++)
		{
			MPI_Bcast(&(matrixB[x][0]), 1, typeB, 0, MPI_COMM_WORLD);
		}
	}
	else if(id != 0)
	{

		MPI_Bcast(&chunkSize, 1, MPI_INT, 0, MPI_COMM_WORLD);

		for(int x = 0; x < m; x++)
		{
			MPI_Bcast(&(matrixA[x][0]), 1, typeA, 0, MPI_COMM_WORLD);
		}

		for(int x = 0; x < n; x++)
		{
			MPI_Bcast(&(matrixB[x][0]), 1, typeB, 0, MPI_COMM_WORLD);
		}
			
	}	
		
		if(id==0)
		{
			endTimeComm = MPI_Wtime();
			commTime = endTimeComm - startTimeComm;
		}
		start = id * chunkSize;
		end = start + chunkSize;
		int p, q, r;
		
		for(p = start; p < end; p++)
		{
			for(q = 0; q < n; q++)
			{
				for(r = 0; r < k; r++)
				{
					myWork[p - start][r] += matrixA[p][q] * matrixB[q][r];

				}
			}
		}
		
		for(q = 0; q < chunkSize; q++)
		{
			for(r = 0; r < k; r++)
			{
				myWork2[(q*k) + r] = myWork[q][r];
			}
		}		

		MPI_Barrier(MPI_COMM_WORLD);

		if(id==0)
		{
			startTimeComm = MPI_Wtime();
		}
		MPI_Gatherv(&(myWork2[0]), chunkSize*k, MPI_DOUBLE, &(tempC[0]), counts, offsets, MPI_DOUBLE, 0, MPI_COMM_WORLD);
		if(id==0)
		{
			endTimeComm = MPI_Wtime();
			commTime += endTimeComm - startTimeComm;
		}
		
		MPI_Barrier(MPI_COMM_WORLD);
		if(id==0)
		{
			for(int w = 0; w < m*k; w++)
			{
				matrixC[w/m][w%k] = tempC[w];
			}
			if(chunkRemainder > 0)
			{
				for(int p = m - 1; p >= m - chunkRemainder; p--)
				{
					for(int q = 0; q < n; q++)
					{
						for(int r = 0; r < k; r++)
						{
							matrixC[p][r] += matrixA[p][q] * matrixB[q][r];
						}
					}
				}
			}
		}



			
	if(id == 0)
	{		
		endTime = MPI_Wtime();
		executionTimePS = endTime - startTime;

		//  Print results
		cout << "Execution Time" << endl;
		cout << "--------------" << endl;
		//~ cout << "Sequential:                          " << executionTimeS << " seconds" << endl << endl;
		cout << "Parallel (Static):                   " << executionTimePS << " seconds" << endl << endl;
		cout << "Communication Time" << endl;
		cout << "--------------" << endl;
		cout << "Parallel (Static):                   " << commTime << " seconds" << endl << endl;

		//~ cout << "Speedup" << endl;
		//~ cout << "--------------" << endl;
		//~ cout << "Parallel (Static):                   " << calculateSpeedup(executionTimeS, executionTimePS) << endl << endl;

		//~ print(matrixA, m, n);
		//~ cout << endl;
		//~ print(matrixB, n, k);
		//~ cout << endl;
		//~ print(matrixC, m, k);

		//  Free memory
		deleteMatrix(matrixA, m);
		deleteMatrix(matrixB, n);
		deleteMatrix(matrixC, m);
		MPI_Type_free(&typeA);
		MPI_Type_free(&typeB);
		MPI_Type_free(&typeC);
		MPI_Type_free(&typeWork);
	}
	
	MPI_Barrier(MPI_COMM_WORLD);

	MPI_Finalize();	
}

//  Initializes matrix of size numRows x numCols
//  Values set to zero
void initializeMatrix(double **matrix, int numRows, int numCols)
{
	for(int i = 0; i < numRows; i++)
	{
		matrix[i] = new double[numCols];
	}

	for(int i = 0; i < numRows; i++)
	{
		for(int j = 0; j < numCols; j++)
		{
			matrix[i][j] = 0.0;
		}
	}
}

//  Populates matrix of size numRows x numCols
//  Values are floating point values between -10 and 10
void populateMatrix(double **matrix, int numRows, int numCols)
{
	for(int i = 0; i < numRows; i++)
	{
		for(int j = 0; j < numCols; j++)
		{
			matrix[i][j] = -10.0 + rand() * 20.0 / RAND_MAX;
		}
	}
}

//  Multiplies two matrices of size m x n and n x k
//  Stores result in matrix of size m x k
//  Sequential
void matrixMultiplyS(double **matrixA, double **matrixB, double **matrixC, int m, int n, int k)
{
	for(int p = 0; p < m; p++)
	{
		for(int q = 0; q < n; q++)
		{
			for(int r = 0; r < k; r++)
			{
				matrixC[p][r] += matrixA[p][q] * matrixB[q][r];
			}
		}
	}
}

//  Frees memory allocated to matrix with numRows rows
void deleteMatrix(double **matrix, int numRows)
{
	for(int i = 0; i < numRows; i++)
	{
		delete[] matrix[i];
		matrix[i] = NULL;
	}

	delete[] matrix;
	matrix = NULL;
}

//  Calculates speedup for the given execution times
double calculateSpeedup(double executionTime1, double executionTime2)
{

	if(executionTime2 == 0)
	{
		return 1;
	}
	else
	{
		return executionTime1/executionTime2;
	}
}

//  Prints the contents of the given m x n matrix
void print(double **matrix, int m, int n)
{
	for(int p = 0; p < m; p++)
	{
		for(int q = 0; q < n; q++)
		{
			cout << matrix[p][q] << " ";
		}
		cout << endl;

	}
}
