#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <mpi.h>

#define ROWBLOCKS 1
#define COLUMNBLOCKS 2


void matvecMPI(int N, int blockType);
void matvec(double *A, double *x, double *y, int N, int M);

int main(int argc, char *argv[]) {
    int rc, myrank;
    
    rc = MPI_Init(&argc, &argv);
    if(rc != MPI_SUCCESS) {
        printf("MPI_Init error %d\n", rc);
        MPI_Abort(MPI_COMM_WORLD, rc);
    }
    
    MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
    if(argc != 2) {
        if(myrank == 0) fprintf(stderr, "Usage %s [matrix size]\n", argv[0]);
        MPI_Abort(MPI_COMM_WORLD, argc);
    }
    
    int N = strtol(argv[1], 0, 10);     //Matrix dimension

    if(myrank == 0) printf("\nUsing column-wise block division:\n");
    matvecMPI(N, COLUMNBLOCKS);
    
    if(myrank == 0) printf("Using row-wise block division:\n");
    matvecMPI(N, ROWBLOCKS);
    
    MPI_Finalize();
    return 0;
}  

//Function that performs matrix vector multiplication, y=A*x
void matvec(double *A, double *x, double *y, int N, int M) {    //N = number of rows, M = number of columns
    int i, j;
    
    for(i = 0; i < N; i++) {
        y[i] = 0.;
        for(j = 0; j < M; j++) {
            y[i] += A[i * M + j] * x[j];
        }
    }
}

//Function that performs matrix vector multiplication using MPI
void matvecMPI(int N, int blockType) {         //N is the matrix size
    int i, j;
    int myrank, ntasks;
    
    MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
    MPI_Comm_size(MPI_COMM_WORLD, &ntasks);

    if(N < 1) {
        if(myrank == 0) fprintf(stderr, "Matrix size incorrect, should be positive number\n");
        MPI_Abort(MPI_COMM_WORLD, N);
    }
    
    int nElems;                                         //Number of rows this task has
    int *nElemsArray = malloc(ntasks * sizeof(int));    //Array containing the number of elements of each task,
    int *NnElemsArray = malloc(ntasks * sizeof(int));   //The prefix N means same array multiplied by N.        
    int *displs = malloc(ntasks * sizeof(int));         //Displacement arrays
    int *Ndispls = malloc(ntasks * sizeof(int));                  

    if(myrank == 0) {
        for(int rank = 0; rank < ntasks; rank++) {
            //Compute the number of rows per task and the corresponding displacements
            nElemsArray[rank] = ((rank+1) * N) / ntasks - (rank * N) / ntasks;
            displs[rank] = (rank * N) / ntasks;
        }
    }

    //Now share nElemsArray and displs to later on use allgatherv
    MPI_Bcast(nElemsArray, ntasks, MPI_INT, 0, MPI_COMM_WORLD);
    MPI_Bcast(displs, ntasks, MPI_INT, 0, MPI_COMM_WORLD);

    for(int rank = 0; rank < ntasks; rank++) {
        NnElemsArray[rank] = nElemsArray[rank] * N;
        Ndispls[rank] = N * displs[rank];
    }
    nElems = nElemsArray[myrank];
    
    //Vectors x,y and matrix A, together with local versions. Not all of these are needed
    //for column-wise block division of the matrix, but Alocal is (and that is by far the largest).
    double *x = malloc(N * sizeof(double));
    double *y = malloc(N * sizeof(double));
    double *xLocal = malloc(nElems * sizeof(double));
    double *yLocal = malloc(nElems * sizeof(double));
    double *ALocal = malloc(nElems * N * sizeof(double));
    double startTime, endTime;                              //To store some wallclock times later on
    
    if(blockType == ROWBLOCKS) {         //Use row-wise block division of the matrix
        //Initialize both ALocal and xLocal
        for(i = 0; i < nElems; i++) {
            for(j = 0; j < N; j++) {
                ALocal[i * N + j] = (displs[myrank] + i) * N + j;
            }
            xLocal[i] = displs[myrank] + i;
        }
       
        //Now perform matrix vector multiplication in parallel. Initially,
        //the vector x is scattered over the tasks so it needs to be gathered first.
        MPI_Barrier(MPI_COMM_WORLD);        //Make sure all tasks start the computation at the same time
        if(myrank == 0) startTime = MPI_Wtime();
        MPI_Allgatherv(xLocal, nElems, MPI_DOUBLE, x, nElemsArray, displs, MPI_DOUBLE, MPI_COMM_WORLD);
        
        //Every task now does his part of the matrix vector multiplication
        matvec(ALocal, x, yLocal, nElems, N);
        
        //Gather the result, so that every task has the solution y
        MPI_Allgatherv(yLocal, nElems, MPI_DOUBLE, y, nElemsArray, displs, MPI_DOUBLE, MPI_COMM_WORLD);
    }
    
    else if (blockType == COLUMNBLOCKS) {    //Use column-wise block division of the matrix
        //Initialize both ALocal and xLocal
        for(i = 0; i < N; i++) {
            for(j = 0; j < nElems; j++) {
                ALocal[i * nElems + j] = displs[myrank] + i * N + j;
            }
        }
        for(i = 0; i < nElems; i++) {
            xLocal[i] = displs[myrank] + i;
        }
        //Perform the matrix vector multiplication in parallel.
        MPI_Barrier(MPI_COMM_WORLD);        //Make sure all tasks start the computation at the same time
        if(myrank == 0) startTime = MPI_Wtime();
        matvec(ALocal, xLocal, y, N, nElems);
        
        //The resulting y-vectors need to be added to get the final solution
        MPI_Allreduce(MPI_IN_PLACE, y, N, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
    }
    
    if(myrank == 0) {
        endTime = MPI_Wtime();
        printf("\tTotal time = %.3e s\n", endTime - startTime);
    }

    //Compute the analytical answer for y[] and compare with the results
    double s = N - 1, ans, maxdiff = 0;
    for(i = 0; i < N; i++) {
        ans = s * (s + 0.5) * (s + 1) / 3. + (double) N * i * s * (s+1) * 0.5;
        maxdiff = fmax(fabs(ans - y[i]), maxdiff);
    }
    MPI_Reduce(&maxdiff, &ans, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
    
    if(myrank == 0) printf("\tThe maximum error in y[i] is %e\n", ans);
    
    free(x);
    free(xLocal);
    free(y);
    free(yLocal);
    free(ALocal);
    free(nElemsArray);
    free(NnElemsArray);
    free(displs);
    free(Ndispls);
}
