/*! \author Guillaume Collombet, Paul Talvat */

/*! \mainpage Fox & Cannon distributed matrix multiplication algorithm
 *
 * \section intro_sec Introduction
 *
 * This piece of software is an academical project.
 * It allow to perform distributed matrix multiplication on several cluster using the MPI library.
 * A buch of algorithm exists to perform that operation more or less efficiently.
 * This project implement the Fox and Cannon algorithms.
 *
 * You can checkout the project with your Subversion client through this command:
 * > svn checkout https://guillaumecollombet.googlecode.com/svn/trunk/mif32
 *
 * \section install_sec Instalation
 *
 * This project is designed to run on Linux environment, but it can be easily port to other platform
 * since all their dependancies are cross plateform.
 *
 * In order to build the project you need to have the cross plateform compilation tool *CMake* installed on your platform.
 * > [sudo apt-get install cmake](apt://cmake)
 *
 * If you want to generate the documentation you have to install *Doxygen*
 * > [sudo apt-get install doxygen](apt://doxygen)
 *
 * \section test_sec Tests
 *
 * A set of tests are processed at the compilation end.
 * Their tests are written in independant c++ files located in ./test
 * The library <b>UnitTest++</b> have been used in order to perform unit testing.
 * This is a lightweight API locally included and linked (in ./include and ./lib), therefore no suplementary installation are requiered.
 *
 * To run tests, locate the *test* execuable generated in your build directory and run it :
 * > test
 * The tests results appear in your terminal out.
 *
 * \section use_Sec Use
 *
 * The programm is made to be to be launched with the *mpirun* executable using the following synthax :
 * > mpirun -np \<number_of_proc\> FoxCannon \<filename_in_matrix_a\> \<filename_in_matrix_b\> \<filename_out_matrix\>
 * Where :
 * - \<number_of_proc\> is the number of processor
 * - \<filename_in_matrix_a\> is the first input matrix filename
 * - \<filename_in_matrix_b\> is the second input matrix filename
 * - \<filename_out_matrix\> is the output matrix filename
 *
 * The input files are simple text files that discribe a matrix using the following structure :\n
 *
 * First line contain the widht and heigth seprated by space.\n
 * Then the following line represents the row of the matrix.\n
 * Into the row the values of each column are separeted by a ';'.\n
 *
 * For exemple for a 4x4 matrix full of zero :\n
 *
 * 4 4\n
 * 0;0;0;0\n
 * 0;0;0;0\n
 * 0;0;0;0\n
 * 0;0;0;0\n
 *
 * <b>The programm can automaticaly generate a bunch of input files</b>, by launching it directly without arguments :
 * > ./FoxCannon
 */

#include <iostream>
#include <fstream>
#include <sstream>
#include <math.h>
#include <mpi.h>
#include <unistd.h>

#include "torus.h"
#include "matrix.h"
#include "timemeasurement.h"

#define MPI_MATRIXTYPE MPI_INT
#define mattype int

/*! \addtogroup Main
 * \brief This module contains several all function to perform matrix multiplication
 *  @{
 */

/*! Print the expression and it's evaluation with additional information on the current function, line and process */
#define PRINT_MPI(var) std::cout \
    << __PRETTY_FUNCTION__ << ", line " << __LINE__ << ": " \
    << "(" << processorRank << "/" << processorCount << ") " \
    << #var <<": " << var << std::endl

Matrix<mattype> *mat; /*!< \brief The result matrix */
Matrix<mattype> *a; /*!< \brief The A matrix */
Matrix<mattype> *b; /*!< \brief The B matrix */
Matrix<mattype> *submat; /*!< \brief The local submatrix */
Matrix<mattype> *subA; /*!< \brief The local A submatrix,  */
Matrix<mattype> *subB; /*!< \brief The local B submatrix,  */

int processorCount; /*!< \brief The number of processor */
int processorRank; /*!< \brief The rank of the actual processor */
int subMatSize; /*!< \brief The submatrix size */
int matSize; /*!< \brief The matrix size */
int gridSize; /*!< \brief The processor grid square size*/
MPI_Comm rowComm; /*!< \brief The row communicator trought the torus */
MPI_Comm colComm; /*!< \brief The column communicator trought the torus */
MPI_Comm gridComm; /*!< \brief The grid communicator trought the torus */
Coord processorCoord; /*!< \brief The processor coordinate into the torus */

/*! \brief initialize the square matrix of dimension `size` \f$ \times \f$ `size`. */
bool initialize(const char* fileA, const char* fileB);
/*! \brief split matrix A and B in submatrix in processor 0 and send them to the others processors */
void split();
/*! \brief distributed matrix multiplication by Cannon method */
void cannon();
/*! \brief distributed matrix multiplication by Fox method */
void fox();
/*! \brief each proccesors send their computed submatrix to proccessor 0 which merge them to form result matrix C */
void merge();
/*! \brief readFile feed matrix from file */
bool readFile(const char* filename, Matrix<mattype> **value);
/*! \brief writeFile write matrix to file */
template<typename T>
bool writeFile(const char *filename, const Matrix<T> *value)
{
    std::ofstream file(filename);
    if(file.is_open()) {
        file << *value;
        return true;
    }
    else
        return false;
}
/*! \brief print to output and write result matrix to file */
void result(const char* fileResult);

/*! @} */

void generate() {
    if(processorRank == 0)
    {
        Matrix<mattype> t;
        for(unsigned int i = 2; i <= 1024; i *= 2) {
            for(int j = 1; j < 10; j++) {
                t = Matrix<mattype>(i,i,j);
                std::stringstream str;
                str << i << "x" << i << "_" << j;
                writeFile(str.str().c_str(),&t);
            }
        }
    }
}

void generate2() {
    if(processorRank == 0)
    {
        Matrix<mattype> t;
        t = Matrix<mattype>(192,192,2);
        std::stringstream str;
        str << 192 << "x" << 192 << "_" << 2;
        writeFile(str.str().c_str(),&t);
    }
}

void testPerformance2() {
    const int repetition = 10;
    timer chronos;
    Matrix<double> perf(repetition,repetition,0.);
    std::stringstream str;
    str << "192x192_2" ;
    initialize(str.str().c_str(),str.str().c_str());
    if(processorRank == 0) {
        PRINT_MPI(str.str());
    }
    split();
    for(int i = 0; i < repetition; i++) {
        if(processorRank == 0)
            chronos.start();
        for(int k = 0; k < (i+1); k++)
            fox();
        if(processorRank == 0) {
            *perf.at(0,i) = chronos.stop();
            PRINT_MPI(*perf.at(0,i));
        }
    }
    merge();
    str << "_r";
    result(str.str().c_str());
    std::stringstream filer;
    filer << "perf_192x192_" << gridSize << "x" << gridSize ;
    writeFile<double>(filer.str().c_str(),&perf);
}


void testPerformance() {
    const int power = 5;
    const int repetition = 1;
    int max = (power > repetition) ? power : repetition;
    timer chronos;
    Matrix<double> perf(max,max,0.);
    int size = 8;
    for(int j = 0; j < power; j++)
    {
        size *= 2;
        std::stringstream str;
        str << size << "x" << size << "_" << 9;
        initialize(str.str().c_str(),str.str().c_str());
        if(processorRank == 0) {
            PRINT_MPI(str.str());
        }
        split();
        for(int i = 0; i < repetition; i++) {
            if(processorRank == 0)
                chronos.start();
            for(int k = 0; k < (i+1); k++)
                fox();
            if(processorRank == 0) {
                *perf.at(j,i) = chronos.stop();
                PRINT_MPI(*perf.at(j,i));
            }
        }
        merge();
        str << "_r";
        result(str.str().c_str());
    }
    writeFile<double>("perf",&perf);
}

int main(int argc, char** argv)
{

    if(argc >= 4) {
        MPI_Init(&argc,&argv);
        testPerformance();
        testPerformance2();
        if(initialize(argv[1],argv[2])) {

            split();
            fox();
//            cannon();
            merge();
            result(argv[3]);
            MPI_Finalize();
            return 0;
        }
        else {
            MPI_Finalize();
            return -1;
        }
    }
    else if (argc == 1){
        generate();
        generate2();
        return 0;
    }
    else
    {
        std::cerr << "Incorect number of argument, the programm must be called with the following synthax : " << std::endl <<
                     "mpirun -np <processor_count> foxcannon <matrix_A_filename> <matrix_B_filename> <result_matrix_filename>" << std::endl;
        return -1;
    }
}

bool initialize(const char *fileA, const char *fileB)
{
    MPI_Comm_size(MPI_COMM_WORLD, &processorCount);
    MPI_Comm_rank(MPI_COMM_WORLD, &processorRank);

    if(processorRank == 0) {
        //Initialize the matrix only in the first process in order to save memory
        if(!readFile(fileA, &a)) {
            std::cerr << "Enable to read file '" << fileA << "'." << std::endl;
            return false;
        }
        if(!readFile(fileB, &b)) {
            std::cerr << "Enable to read file '" << fileB << "'." << std::endl;
            return false;
        }
        if((a->width() != a->height()) || (a->width() <= 0)) {
            std::cerr << "Matrix A size should be square and greater than 0" << std::endl;
            return false;
        }
        if((b->width() != b->height()) || (b->width() <= 0)) {
            std::cerr << "Matrix B size should be square and greater than 0" << std::endl;
            return false;
        }
        matSize = a->width();
    }

    MPI_Bcast(&matSize,1,MPI_INT,0,MPI_COMM_WORLD);

    int div = (unsigned int)floor(sqrt((double)processorCount));
    //Requirements
    if( !((matSize > 0) && (div <= matSize)) ) {
        std::cerr << "Square root of processor count should be inferior or equal to matrix size" << std::endl;
        return false;
    }

    //Test if div is a multiple of size
    if(!( ((double)matSize / (double)div)
          == floor((double)matSize / (double)div) )) {
        std::cerr << "Square root of processor count should divide the matrix size" << std::endl;
        return false;
    }

    //Create row communicators to allow commincation trough a proccessors torus.
    int dims[2] = {div,div};
    int periods[2] = {1,1};
    int coords[2];

    MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 1, &gridComm);
    MPI_Comm_rank(gridComm, &processorRank);
    MPI_Cart_coords(gridComm, processorRank, 2, coords);
    /* Create communicator for each row and column */
    MPI_Comm_split(gridComm, coords[0], coords[1], &rowComm);
    MPI_Comm_rank(rowComm, &processorCoord.col);
    //Create column communicators to allow commincation trough a proccessors torus.
    MPI_Comm_split(gridComm, coords[1], coords[0], &colComm);
    MPI_Comm_rank(colComm, &processorCoord.row);

    //Sets the current processor coordinate into the torus.
    subMatSize = matSize / div;
    gridSize = div;
    return true;
}

void split()
{
    if(processorRank == 0) {
        //Create a datatype representing the submatrix
        MPI_Datatype submattype;
        MPI_Type_vector(subMatSize,subMatSize,matSize,MPI_MATRIXTYPE,&submattype);
        MPI_Type_commit(&submattype);
        //Create request descriptor array
        MPI_Request sRequests[processorCount * 2];
        //For each processor we send the a and b sub matrix to their corresponding processor
        int i;
        for(int y = 0; y < gridSize; y++) {
            for(int x = 0; x < gridSize; x++) {
                i = y * gridSize + x;
                MPI_Isend((void*) a->at(y * subMatSize, x * subMatSize), //submatrix address
                          1, //number count
                          submattype, //matrix datatype
                          i, //destination processor
                          i, //message tag
                          MPI_COMM_WORLD,
                          &sRequests[i]); //request address
                MPI_Isend((void*) b->at(y * subMatSize, x * subMatSize), //submatrix address
                          1, //submatrix count
                          submattype, //matrix datatype
                          i, //destination processor
                          processorCount + i, //message tag
                          MPI_COMM_WORLD,
                          &sRequests[processorCount + i]); //request address
            }
        }
        //Wait for the send requests to finish
        MPI_Waitall(processorCount * 2,sRequests,MPI_STATUSES_IGNORE);
        //Delete the matrix which are now distributed
        delete a;
        delete b;
    }
    MPI_Request rRequests[2];
    subA = new Matrix<mattype>(subMatSize,subMatSize,0);
    subB = new Matrix<mattype>(subMatSize,subMatSize,0);

    int i = processorCoord.row * gridSize + processorCoord.col;
    //For each processor exept the first we receive the sub matrix
    MPI_Irecv(subA->data(), //submatrix address
              subMatSize * subMatSize, //number of element to read in submatrix
              MPI_MATRIXTYPE, //integer datatype
              0, //source processor
              i, //message tag
              MPI_COMM_WORLD,
              &rRequests[0]); //request address
    MPI_Irecv(subB->data(), //submatrix address
              subMatSize * subMatSize, //number of element to read in submatrix
              MPI_MATRIXTYPE, //integer datatype
              0, //source processor
              processorCount + i, //message tag
              MPI_COMM_WORLD,
              &rRequests[1]); //request address
    MPI_Waitall(2,rRequests,MPI_STATUSES_IGNORE);
}

void merge()
{
    MPI_Send((void*) submat->data(), //submatrix address
             subMatSize * subMatSize, //number of element to read in submatrix
             MPI_MATRIXTYPE, //matrix datatype
             0, //destination processor
             processorRank, //message tag
             MPI_COMM_WORLD); //request address

    if(processorRank == 0) {
        //Create a datatype representing the submatrix
        MPI_Datatype submattype;
        MPI_Type_vector(subMatSize,subMatSize,matSize,MPI_MATRIXTYPE,&submattype);
        MPI_Type_commit(&submattype);
        //Create request descriptor array
        MPI_Request rRequests[processorCount];
        //Initialize the matrix
        mat = new Matrix<mattype> (matSize,matSize);
        int i;
        for(int y = 0; y < gridSize; y++) {
            for(int x = 0; x < gridSize; x++) {
                i = y * gridSize + x;
                MPI_Irecv((void*) mat->at(y * subMatSize,x * subMatSize),
                          1,
                          submattype,
                          i,
                          i,
                          MPI_COMM_WORLD,
                          &rRequests[i]);
            }
        }
        MPI_Waitall(processorCount,rRequests,MPI_STATUSES_IGNORE);
    }
    delete subA;
    delete subB;
}

void fox()
{
    // Allocate storage for temporary local matrix
    Matrix<mattype> *tmp = new Matrix<mattype>(subMatSize,subMatSize,0);
    submat = new Matrix<mattype>(subMatSize,subMatSize,0);

    // Destination for circular shift in columns
    int dest = (processorCoord.row -1 + gridSize) % gridSize;
    // Source for circular diffusion in columns
    int source = (processorCoord.row + 1) % (gridSize);
    for (int stage = 0; stage < gridSize; stage++) {
        int root = (processorCoord.row + stage) % gridSize;  //Process that does the broadcast
        if ( root == processorCoord.col ) {
            //Send to all other processes in the same row
            MPI_Bcast(subA->data(), subMatSize * subMatSize, MPI_MATRIXTYPE, root, rowComm);
            //Multiply the submatrices */
            Matrix<mattype>::multiplyAdd(*subA,*subB,*submat);
        }
        else {
            //Receive submatrix of A from the process that broadcasts
            MPI_Bcast(tmp->data(), subMatSize * subMatSize, MPI_MATRIXTYPE, root, rowComm);
            //Multiply it with own submatrix subB
            Matrix<mattype>::multiplyAdd(*tmp,*subB,*submat);
        }

        /* Send submatrix of B up and receive a new from below */
        MPI_Sendrecv_replace(subB->data(), subMatSize * subMatSize, MPI_MATRIXTYPE, dest, processorCoord.col, source, processorCoord.col, colComm, MPI_STATUSES_IGNORE);
    }

    delete tmp;
}

bool readFile(const char *filename,  Matrix<mattype> **value)
{
    std::ifstream file(filename);
    *value = new Matrix<mattype>();
    if(file.is_open()) {
        file >> **value;
        //        PRINT_MPI(**value);
        return true;
    }
    else
        return false;
}

void result(const char* fileResult)
{
    if(processorRank == 0) {
        //        PRINT_MPI(*mat);
        writeFile<mattype>(fileResult,mat);
        delete mat;
    }
}

void cannon() {
//2nd imp
    //Initialisation

    //Pre-skewing
    submat = new Matrix<mattype>(subMatSize,subMatSize,0);

    int sourcePreSkeA = processorCoord.col;
    int sourcePreSkeB = processorCoord.row;

    int mod;
    mod = (processorCoord.col - processorCoord.row) % gridSize;
    int destPreSkeA = ((mod >= 0) ? mod : gridSize + mod);
    mod = (processorCoord.row - processorCoord.col) % gridSize;
    int destPreSkeB = ((mod >= 0) ? mod : gridSize + mod);

    MPI_Sendrecv_replace(subA->data(),
                         subMatSize * subMatSize,
                         MPI_MATRIXTYPE,
                         destPreSkeA,
                         0,
                         sourcePreSkeA,
                         0,
                         rowComm,
                         MPI_STATUSES_IGNORE);
    MPI_Barrier(MPI_COMM_WORLD);

    MPI_Sendrecv_replace(subB->data(),
                         subMatSize * subMatSize,
                         MPI_MATRIXTYPE,
                         destPreSkeB,
                         0,
                         sourcePreSkeB,
                         0,
                         colComm,
                         MPI_STATUSES_IGNORE);
    MPI_Barrier(MPI_COMM_WORLD);
    //Algorithm core

    //Compute dest and source coordinates
    int sourceA = (processorCoord.col + 1) % (gridSize);
    int destA = (processorCoord.col -1 + gridSize) % gridSize;

    int sourceB = (processorCoord.row + 1) % (gridSize);
    int destB = (processorCoord.row -1 + gridSize) % gridSize;

    //Cannon algorithm
    for (int stage = 0; stage < gridSize; stage++) {

        Matrix<mattype>::multiplyAdd(*subA,*subB,*submat);

        // Send submatrix of B up and receive a new from below
        MPI_Sendrecv_replace(subB->data(),
                             subMatSize * subMatSize,
                             MPI_MATRIXTYPE,
                             destB,
                             0,
                             sourceB,
                             0,
                             colComm,
                             MPI_STATUSES_IGNORE);
        MPI_Barrier(MPI_COMM_WORLD);
        // Send submatrix of A left and receive a new from below
        MPI_Sendrecv_replace(subA->data(),
                             subMatSize * subMatSize,
                             MPI_MATRIXTYPE,
                             destA,
                             0,
                             sourceA,
                             0,
                             rowComm,
                             MPI_STATUSES_IGNORE);
        MPI_Barrier(MPI_COMM_WORLD);

    }

    //Post-skewing

    //Compute dest and source coordinates
    int sourcePostSkeA= processorCoord.col;
    int destPostSkeA = (processorCoord.col  + processorCoord.row) % gridSize;

    int sourcePostSkeB=processorCoord.row;
    int destPostSkeB =  (processorCoord.row  + processorCoord.col) % gridSize;

    MPI_Sendrecv_replace(subA->data(),
                         subMatSize * subMatSize,
                         MPI_MATRIXTYPE,
                         destPostSkeA,
                         0,
                         sourcePostSkeA,
                         0,
                         rowComm,
                         MPI_STATUSES_IGNORE);
    MPI_Barrier(MPI_COMM_WORLD);
    MPI_Sendrecv_replace(subB->data(),
                         subMatSize * subMatSize,
                         MPI_MATRIXTYPE,
                         destPostSkeB,
                         0,
                         sourcePostSkeB,
                         0,
                         colComm,
                         MPI_STATUSES_IGNORE);
    MPI_Barrier(MPI_COMM_WORLD);
}


