/* mv_mpi.c

   Parallel implementation of the matrix-vector multiplication.
   Limitation: impose double operation to all values

   Usage:
   	mv_mpi <matrix> <vector>

   Basile Clout, September 2007

   */

#include <stdio.h>
#include <stdlib.h>
#include "mvutils_mpi.h"

#define MASTER 0


int main(int argc, char *argv[]){

  double **mat;
  double *vec;
  double *res;
  double *lmat;			/* Part (local) of matrix mat */
  double *resf;			/* Final result, only for the master */
  
  double *pres_out;
  double *pres_in;

  int *atoa_cout;			/* MPI_Alltoall required structures  */
  int *atoa_cin;
  int *atoa_dout;
  int *atoa_din;

  int *gather_count;
  int *gather_displ;
  int lc, lr;

  int i,j;
  int rm = MASTER;
  int r, p;			/* master, rank, number of processors */
  int m, n;			/* Number of rows in the matrix, of cols */
  int nv;			/* Length of the vector */

  
  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &r);
  MPI_Comm_size(MPI_COMM_WORLD, &p);

  if(r==rm && argc!=3){
    printf("Usage: %s <matrix> <vector>\n", argv[0]);fflush(stdout);
    MPI_Abort(MPI_COMM_WORLD, 1);
  }

  MPI_Barrier(MPI_COMM_WORLD);

  /* Scatters matrix and vector among the cluster */
  mread(argv[1], &mat, &lmat, &m, &n, MPI_COMM_WORLD, rm);
  vread(argv[2], &vec, &n, MPI_COMM_WORLD, rm);
    
  if(PRINT){
    mprint(mat, m, n, MPI_COMM_WORLD, rm);
    vprint(vec, n, MPI_COMM_WORLD, rm);
  }
  
  lc = BLOCK_SIZE(r, p, n);	/* Number of colums for each process */
  
  /* Now, each processor does its computation */
  pres_out = (double *) malloc(m*sizeof(double));
  pres_in = (double *) malloc(p*lc*sizeof(double)); /* Allocation of the memory for the receive side of the atoav:
						       p processors send lc semi-sums to the node*/
  if(!pres_out || !pres_in){
    printf("Impossible to allocate memory for the alltoallv operation by %d!\n", r);
    MPI_Abort(MPI_COMM_WORLD, 1);
  }

  /* Real computation */
  for(i=0;i<m;i++){
    pres_out[i] = 0.0;
    for(j=0;j<lc;j++){
      //      printf("im %d: mat[%d][%d]=%.2f \n", r, i, j, mat[i][j]);
      //printf("im %d: vec[%d]=%.2f\n", r, j, vec[j]);
      pres_out[i] += mat[i][j]*vec[j];
    }
  }

  /* Use Alltoallv to send everything to everybody */
  atoa_cout = (int *) malloc(p*sizeof(int));
  atoa_cin = (int *) malloc(p*sizeof(int));
  atoa_dout = (int *) malloc(p*sizeof(int));
  atoa_din = (int *) malloc(p*sizeof(int));

  /* Sending side */
  atoa_cout[0] = BLOCK_SIZE(0, p, m); /* m rows to share. Each node takes BLOCK_SIZE(r, p, m) */
  atoa_dout[0] = 0;
  for(i=1;i<p;i++){
    atoa_cout[i] = BLOCK_SIZE(i, p, m); /* The node send different amount of data for each other node */
    atoa_dout[i] = atoa_dout[i-1] + atoa_cout[i-1];
  }

  /* Receiving side */
  atoa_cin[0] = BLOCK_SIZE(r, p, m);
  atoa_din[0] = 0;
  for(i=1;i<p;i++){
    atoa_cin[i] = BLOCK_SIZE(r, p, m); /* The node receives the same amount of data from each other node! */
    atoa_din[i] = atoa_din[i-1] + atoa_cin[i-1];
  }

  /* Perform the AlltoAllv */
  MPI_Alltoallv(pres_out, atoa_cout, atoa_dout, MPI_DOUBLE, pres_in, atoa_cin, atoa_din, MPI_DOUBLE, MPI_COMM_WORLD); /* Seems that this operation destroyes the atoa_cout array ...??? */
  

  lr = BLOCK_SIZE(r, p, m);	/* Local number of rows */
  
  /* Perform the final sum of the partial values */
  res = (double *) malloc(lr*sizeof(double));
  for(i=0;i<lr;i++){
    res[i] = 0.0;
    for(j=0;j<p;j++)
      res[i] += pres_in[j*lr + i];
  }
  
  if(r==rm){
    resf = (double *) malloc(m*sizeof(double));
    if (!resf){
      printf("Error: Impossible to malloc space for final vector result by master %d!\n", r);
      MPI_Abort(MPI_COMM_WORLD, 1);
    }
  }

  /* Prepare arrays for the gatherv */
  gather_count = (int *) malloc(p*sizeof(int));
  gather_displ = (int *) malloc(p*sizeof(int));
  if(!gather_count || !gather_displ){
    printf("Impossible to allocate space for the last gatherv structures by %d", r);
    MPI_Abort(MPI_COMM_WORLD, 1);
  }

  gather_count[0] = BLOCK_SIZE(0, p, m);
  gather_displ[0] = 0;
  for(i=1;i<p;i++){
    gather_count[i] = BLOCK_SIZE(i, p, m);
    gather_displ[i] = gather_displ[i-1] + gather_count[i-1];
  }
  
  /* And gatherv the result to the master node */
  MPI_Gatherv(res, lr, MPI_DOUBLE, resf, gather_count, gather_displ, MPI_DOUBLE, rm, MPI_COMM_WORLD);

  if(r==rm){
    
    if (PRINT){
      puts("Result:");
      /* Print result vector */
      for(i=0;i<m;i++)
	printf("%.2f ", resf[i]);
      putchar('\n');
    }
    
  }
  
  MPI_Finalize();
  exit(0);
}
