/* mvs_mpi.c

   Simple matrix-vector multiplication

   Parallel version, using mpi

   Basile Clout, September 2007*/


#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <mpi.h>
#include <sys/time.h>
#include <getopt.h>
#include <unistd.h>
#include <math.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netdb.h>
#include <arpa/inet.h>

#define LEN 128
#define MASTER 0		/* Node acting like the effective master */
#define FIRSTMASTER 0		/* Node responsible to handle input */
#define MASTERSLAVES 1
#define SIZECHUNK 5
#define EQUAL 0
#define EQ_SR 0			/* MPI_Send/recv for sending with eq method */
#define EQ_SV 1			/* MPI_Scatterv for sending with eq method */
#define LOOP 0			/* Computation's empty loop */

#define PRINT 0
#define TIME 1
#define WARNING 0
#define SUMMARY 1
#define UID 1001		/* User for wich we measure the performance */

/*Useful macros*/
/* WARNING: NO SPACE BETWEEN NAME AND PARAMETERS!!! */
#define BLOCK_LOW(r, p, n) ((r)*(n)/(p))
#define BLOCK_HIGH(r, p, n) (BLOCK_LOW(r+1, p, n) - 1)
#define BLOCK_SIZE(r, p, n) (BLOCK_HIGH(r, p, n) - BLOCK_LOW(r, p, n) + 1)

#define CLOCK(c) gettimeofday(&c,(struct timezone *)NULL)
#define CLOCK_DIFF(c1,c2) ((double)(c1.tv_sec-c2.tv_sec)+(double)(c1.tv_usec-c2.tv_usec)/1e+6)


int usage(){
  puts("******************************************************************");
  puts("Usage: \n\tmpirun -np 4 --hostfile hosts `pwd`/mvs_mpi [options] matrix vector");
  puts("Example: \n\t mpirun -np 4 --hostfile lamhosts `pwd`/mvs_mpi --masterlaves --sizechunk 2 e500.mat e500.vec.");
  puts("Options:");
  puts("--eq EQUAL method: All nodes participate to the work at the same level. In effect, each node has to compute a roughly equivalent part of the result vector.(DEFAULT: EQUAL)");
  puts("--ms MASTERSLAVES method: A master node send and manage the work between all the other nodes, the workers. A fast worker (network & CPU) will do more work than a slower one.(DEFAULT: EQUAL)");
  puts("-h, --help Print this help");
  puts("-w, --warnings 1 or 0 Print warning (1) or not (0) (DEFAULT 0)");
  puts("-c, --chunksize arg (ms method) Determines size of the chunk for the master-slaves method (DEFAULT 5).");
  puts("-e, --eqsend 0 or 1 Use the scatterv/gatherv communication for the eq method (1) or classic MPI_Send/Recv (0). (DEFAULT = 1)");
  puts("-p, --print_result 0 or 1 Print (1) the result vector on stdout, or not (0) (DEFAULT 0).");
  puts("-t, --print_time 0 or 1 Print (1) the time on stdout, or not (0). (DEFAULT 1)");
  puts("-m, --master arg Specifies the master node (DEFAULT: 0)");
  puts("-l, --loop arg Specifies the size of the empty loop added to each node computation (DEFAULT: 0)");
  puts("mvs_mpi is the next generation of wrekagrid MPI benchmarking tool. It stands for \"matrix vector simple mpi\", because its implements a parallel row-stripped matrix-vector multiplication, roughly copied from \"PARALLEL PROGRAMMING in C with MPI and OpenMPI\" by Michael J. Quinn.\n");
  puts("Basile Clout, University of New Brunswick, September 2007.\n");
  puts("******************************************************************");
}


void * mymalloc(int r, int size){

  void *buffer;

  if(!(buffer = malloc((size_t)size))){
    printf("ERROR: Impossible to malloc space for processor %d\n", r);
    fflush(stdout);
    MPI_Abort(MPI_COMM_WORLD, 1);
  }

  return buffer;
}


int main(int argc, char *argv[]){

  int r, p;
  int rm, frm;
  FILE *mS, *vS;
  int nb;
  int m,n;
  int mv, nv, lv;
  int dummy, type, typev;
  char matF[LEN];
  char vecF[LEN];
  int loop=LOOP;

  double **matrix;	    /* complete matrix, only for the master */
  double **mymatrixchunk; /* chunk of the matrix, for th master AND the other nodes */
  double *vector;			/* Vector */
  double *result;			/* Result vector */
  double *myresultchunk;			/* Chunk of the result vector for everybody */
  
  int lr;			/* Local number of rows for each node */
  int nr, rsent, rrecv; /* number of rows to sent, Number of rows already sent, received */
  int i,j;
  int k,l;
  int wait=1;

  MPI_Status status;

  static int sizechunk = SIZECHUNK;
  static int print = PRINT;
  static int time = TIME;
  static int warning = WARNING;
  static int method = EQUAL;
  static int summary = SUMMARY;
  static int eq_send = EQ_SV;
  static int uid = UID;
  char *method_name;

  int *eqsv_count;		/* Arrays for eq method scatterv */
  int *eqsv_displ;
  
  double *buffer;		/* Buffer for one row */
  int nbblocks;			/* Number of blocks of size more or less chunks */
  int *arr_blocks;		/* Array giving the assignment (number of rows) for each block */
  int rcvsize, tmpsize;
  int *arr_nodes;		
  int pad;
  int terminated;
  int lrn;

  double tcomcomp1, tcomcomp2; /* communication + computation time viewed by the master */
  double tio1, tio2;	       /* IO time */
  double gt1, gt2;		       /* Global time (Using MPI_Wtime())*/
  double tcom0, tcom1, tcom2, tcom3, tcom4;   /* Communication times */
  double tcomp1, tcomp2;       /* Computation time (for each node) */
  double *times;		       /* Array containing computation times for each processor */
  int *nbr;			       /* Array containing number of rows computed for each ode */
  int *nbpackets;		       /* Array containing the number of packet received */
  char *ips;
  char *hostnames;
  int mynbr;
  int mynbpackets;
  char myip[LEN];
  char myhostname[LEN];
  struct hostent *ht;
  struct in_addr *mystrangeip;
  double mytime;
  
  double comtime, com1time, com2time;
  double comptime, worktime, totaltime, iotime;
  
  int option_index, g;
  
  
  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &r);
  MPI_Comm_size(MPI_COMM_WORLD, &p);

  MPI_Barrier(MPI_COMM_WORLD);
  gt1 = MPI_Wtime();
  tio1 = gt1;
  
  
  rm = MASTER;
  frm = FIRSTMASTER;

  if(r==frm){
    
    if(argc<3){
      printf("Usage: ./mvs_mpi [options] <matrix> <vector>\n");
      MPI_Abort(MPI_COMM_WORLD, 1);
    }

    if(p<2){
      printf("ERROR: Please use at least two processors!\n");
      MPI_Abort(MPI_COMM_WORLD, 1);
    }

    /* Parse command line arguments */

    static struct option long_options[] = {
      {"eq", no_argument, &method, EQUAL},
      {"ms", no_argument, &method, MASTERSLAVES},
      {"help", no_argument, 0, 'h'},
      {"summary", required_argument, 0, 's'},
      {"warnings", required_argument, 0, 'w'},
      {"chunksize", required_argument, 0, 'c'},
      {"eqsend", required_argument, 0, 'e'},
      {"print_result", required_argument, 0, 'p'},
      {"print_time", required_argument, 0, 't'},
      {"master", required_argument, 0, 'm'},
      {"uid", required_argument, 0, 'u'},
      {"loop", required_argument, 0, 'l'}
    };

    while(1){
      g = getopt_long(argc, argv, "w:c:p:t:hm:s:e:u:l:", long_options, &option_index);

      if(g==-1)
	break;
      
      switch(g){
	
      case 0:

	if (long_options[option_index].flag != 0)
	  break;
	else{
	  printf("ERROR: Bad long option management!\n");
	  MPI_Abort(MPI_COMM_WORLD, 1);
	}

      case 'w':
	warning = (int)atoi(optarg);
	break;

      case 'c':
	sizechunk = (int)atoi(optarg);
	break;

      case 'p':
	print = (int)atoi(optarg);
	break;

      case 's':
	summary = (int) atoi(optarg);
	break;

      case 'l':
	loop = (int)atoi(optarg);
	break;

      case 'e':
	eq_send = (int) atoi(optarg);
	break;
	
      case 't':
	time = (int)atoi(optarg);
	break;

      case 'h':
	usage();
	break;

      case 'u':
	uid = (int)atoi(optarg);
	break;

      case 'm':
	rm = (int)atoi(optarg);
	break;

      case '?':
	break;			/* Given a bad argument */
      }
    }

    if( (argc-optind) == 2){
      strncpy(matF, argv[optind], sizeof(matF));
      optind++;
      strncpy(vecF, argv[optind], sizeof(vecF));
      //      printf("mat=%s, vec=%s\n", matF, vecF);
    }else{
      printf("Usage: ./mvs_mpi [options] <matrix> <vector>\n");
      MPI_Abort(MPI_COMM_WORLD, 1);
    }
  }

  MPI_Bcast(&rm, 1, MPI_INT, frm, MPI_COMM_WORLD);
  
  /* Broadcast the chosen method */
  MPI_Bcast(&method, 1, MPI_INT, frm, MPI_COMM_WORLD);
  MPI_Bcast(&eq_send, 1, MPI_INT, frm, MPI_COMM_WORLD); /* Broadcast which send method to use */
  MPI_Bcast(&matF, sizeof(matF), MPI_CHAR, frm, MPI_COMM_WORLD);
  MPI_Bcast(&vecF, sizeof(vecF), MPI_CHAR, frm, MPI_COMM_WORLD);
  MPI_Bcast(&sizechunk, 1, MPI_INT, frm, MPI_COMM_WORLD);
  MPI_Bcast(&uid, 1, MPI_INT, frm, MPI_COMM_WORLD);
  MPI_Bcast(&loop, 1, MPI_INT, frm, MPI_COMM_WORLD);

  if(r==rm){

    /* Master open matrix file, get the dimensions and send it to the other nodes*/
    if(!(mS = fopen(matF, "r"))){
      printf("ERROR: Impossible to open matrix file %s by master %d\n", argv[1], r);
      MPI_Abort(MPI_COMM_WORLD, 1);
    }
    /* Get the dimensions */
    nb = fscanf(mS, "%d", &m);
    nb += fscanf(mS,"%d", &n);
    nb += fscanf(mS, "%d", &dummy);
    nb += fscanf(mS, "%d", &type);
    if(nb!=4){
      printf("ERROR: Pb when reading matrix file by master %d\n", r);
      MPI_Abort(MPI_COMM_WORLD, 1);
    }
    if(type==0 && warning)
      printf("WARNING [matrix]: In this version, all integers are considered double and all operations involve double type.\n");
    if(method==MASTERSLAVES && sizechunk > m){
      printf("ERROR: Size of chunk too big: procs=%d, rows=%d, chunk=%d.\n", p, m, sizechunk);
      MPI_Abort(MPI_COMM_WORLD, 1);
    }

    /* Master open vector file now! */
    if(!(vS = fopen(vecF, "r"))){
      printf("ERROR: Impossible to open vector file %s by master %d.\n", argv[2], r);
      MPI_Abort(MPI_COMM_WORLD, 1);
    }

    /* And parse it! */
    nb = fscanf(vS, "%d", &mv);
    nb += fscanf(vS,"%d", &nv);
    nb += fscanf(vS, "%d", &dummy);
    nb += fscanf(vS, "%d", &typev);
    if(nb!=4){
      printf("ERROR: Pb when reading vector file by master %d\n", r);
      MPI_Abort(MPI_COMM_WORLD, 1);
    }
    if(typev != type && warning)
      printf("WARNING: type of the values for the matrix and the vector are different by %d.\n", r);
    if(typev==0 && warning)
      printf("WARNING [vector] : In this version, all integers are considered double and all operations involve double type.\n");
    lv = (mv==1) ? nv : mv;
    if(lv!=n){
      printf("ERROR: The length of the vector must be the number of columns of the matrix by %d!.\n", r);
      MPI_Abort(MPI_COMM_WORLD, 1);
    }

    /* Read files into memory (may be not really efficient, but useful for benchs testing (IO separated from communications overhead, opposed to the book point of view */

    /* Malloc matrix (in a dense way ;)*/
    matrix = (double **) mymalloc(r, m*sizeof(double *));
    matrix[0] = (double *) mymalloc(r, n*m*sizeof(double));

    for(i=0;i<m;i++)
      matrix[i] = matrix[0] + i*n;

    /* ... And fill in it! */
    nb=0;
    for(i=0;i<m;i++){
      for(j=0;j<n;j++)
	nb += fscanf(mS, "%lf", &matrix[i][j]);
    }
    if(nb!=i*j){
      printf("ERROR: when reading the matrix into memory by %d\n", r);
      MPI_Abort(MPI_COMM_WORLD, 1);
    }

    
    /* Vector allocation and filling*/
    vector = (double *) mymalloc(r, n*sizeof(double));
    nb=0;
    for(i=0;i<n;i++)
      nb += fscanf(vS, "%lf", &vector[i]); 
    if(nb!=i){
      printf("ERROR: when reading the vector into memory by %d\n", r);
      MPI_Abort(MPI_COMM_WORLD, 1);
    }
  }

  /* For all nodes */
  gethostname(myhostname, sizeof(myhostname));
  ht = gethostbyname(myhostname);
  mystrangeip = (struct in_addr*)ht->h_addr;
  ht = gethostbyaddr(mystrangeip, sizeof(*mystrangeip), AF_INET);
  strncpy(myhostname, ht->h_name, LEN);
  strncpy(myip, inet_ntoa(*mystrangeip), LEN);

  
  MPI_Barrier(MPI_COMM_WORLD);
  tio2 = MPI_Wtime();
  
  /* Start performance measurement */
  tcomcomp1 = MPI_Wtime();
  tcom0 = MPI_Wtime();
  
  /* Broadcast the dimensions values */
  MPI_Bcast(&m, 1, MPI_INT, rm, MPI_COMM_WORLD);
  MPI_Bcast(&n, 1, MPI_INT, rm, MPI_COMM_WORLD);

  /* Malloc space for the complete result vector */
  if(r==rm)
    result = (double *) mymalloc(r, n*sizeof(double));

  /* Malloc space for the complete vector for the other nodes */
  /* And malloc place for matrix (else, error on linux?) */
  if(r!=rm){
    vector = (double *) mymalloc(r, n*sizeof(double));
    matrix = (double **) mymalloc(r, sizeof(double *)); /* Useless ... but necessary! */
  }

  /* Send vector to the other nodes */
  MPI_Bcast(vector, m, MPI_DOUBLE, rm, MPI_COMM_WORLD);

  /* Initialize some counters */
  mynbr = 0;
  mytime = 0.0;
  mynbpackets = 0;

  /* Two methods: EQUAL (all nodes receive a fair amount of data)
     and MASTERSLAVES
  */

  MPI_Barrier(MPI_COMM_WORLD);
  tcom1 = MPI_Wtime();

  if (method==EQUAL){

    /* Get the number of rows taken by this node */
    lr = BLOCK_SIZE(r, p, m);

    /* And malloc space for the matrix chunk to compute accordingly */
    mymatrixchunk = (double **) mymalloc(r, lr*sizeof(double*));
    mymatrixchunk[0] = (double *) mymalloc(r, lr*n*sizeof(double));
    for(i=0;i<lr;i++)
      mymatrixchunk[i] = mymatrixchunk[0] + n*i;

    /* Malloc chunks for the result vector */
    myresultchunk = (double *) mymalloc(r, lr*sizeof(double));

    /* Master send chunks of data, and other receive it*/
    /* With MPI_Send/Recv */
      
    if(eq_send==EQ_SR){

      //      printf("send/recv\n");
      if(r==rm){

	rsent = 0;    

	for(i=0;i<p;i++){
	  nr = BLOCK_SIZE(i, p, m);

	  if(i==rm){
	    for(k=0;k<lr;k++){
	      for(j=0;j<n;j++)
		mymatrixchunk[k][j] = matrix[rsent+k][j];
	    }
	    rsent += nr;
	  }else{
	    MPI_Send(matrix[rsent], nr*n, MPI_DOUBLE, i, 1, MPI_COMM_WORLD);
	    rsent += nr;
	  }
	}
      }
      else
	MPI_Recv(mymatrixchunk[0], lr*n, MPI_DOUBLE, rm, 1, MPI_COMM_WORLD, &status);
     
    }else{	/* With MPI_Send/MPI_Recv */

      /* Build count and displ arrays */
      eqsv_count = (int *) mymalloc(r, p*sizeof(int));
      eqsv_displ = (int *) mymalloc(r, p*sizeof(int));
      eqsv_count[0] = BLOCK_SIZE(0, p, m)*n;
      eqsv_displ[0] = 0;
      for(i=1;i<p;i++){
	eqsv_count[i] = BLOCK_SIZE(i, p, m)*n;
	eqsv_displ[i] = eqsv_displ[i-1] + eqsv_count[i-1];
      }

      //while (wait);
      
      //printf("im %d: lr*n=%d, rm=%d, p=%d\n", r,lr*n, rm, p);
      MPI_Scatterv(matrix[0], eqsv_count, eqsv_displ, MPI_DOUBLE, mymatrixchunk[0], lr*n, MPI_DOUBLE, rm, MPI_COMM_WORLD);
      
      //printf("Im %d: mymatrixchunk: %.2f %.2d %.2f %.2f %.2f\n", r, mymatrixchunk[0][0], mymatrixchunk[0][1], mymatrixchunk[0][2], mymatrixchunk[0][3], mymatrixchunk[0][4]);
      free(eqsv_displ);
      free(eqsv_count);
    }

    MPI_Barrier(MPI_COMM_WORLD);
    tcom2 = MPI_Wtime();

    tcomp1 = MPI_Wtime();
    /* Everybody performs its computation */
    for(i=0;i<lr;i++){
      myresultchunk[i] = 0.0;
      for(j=0;j<n;j++){
	myresultchunk[i] += mymatrixchunk[i][j]*vector[j];
      }
      for(j=0;j<loop;j++);	/* Empty loop */
    }
    tcomp2 = MPI_Wtime();

    mynbr += lr;
    mytime = tcomp2-tcomp1;
    
    /* And send the result back to the master
       Lets use a MPI_Gatherv for that! (Sooooooo funny ;),
       but quite necessary to guarantee a correct order...
       Well, of course, a lot of send/recv could certainly be working
       A MPI_Allgatherv would send the result everywhere,
       but we don't need it for now
       For the sake of comparison with master slave, use
       MPI_Send/recv!*/

    MPI_Barrier(MPI_COMM_WORLD);
    tcom3 = MPI_Wtime();
    //    printf("Im %d: comptime: %f, total comp time is %fs\n", r, mytime, tcom3-tcom2);
    
    /* Do a send/recv */
    if(eq_send == EQ_SR){

      if (r!=rm)
	MPI_Send(myresultchunk, lr, MPI_DOUBLE, rm, 2, MPI_COMM_WORLD);
      else{
	nr = 0;
	for (i=0;i<p;i++){
	  if(i==rm){
	    for(k=0;k<m;k++)
	      result[nr+k] = myresultchunk[k];
	    nr += lr;
	  }else{
	    rrecv = BLOCK_SIZE(i, p, m);
	    MPI_Recv(&result[nr], rrecv, MPI_DOUBLE, i, 2, MPI_COMM_WORLD, &status);
	    nr += rrecv;
	  }
	}
      }
    }
    else{	/* Do a scatterv/gatherv */

      /* Build count and displ arrays */
      eqsv_count = (int *) mymalloc(r, p*sizeof(double));
      eqsv_displ = (int *) mymalloc(r, p*sizeof(double));
      eqsv_count[0] = BLOCK_SIZE(0, p, m);
      eqsv_displ[0] = 0;
      for(i=1;i<p;i++){
	eqsv_count[i] = BLOCK_SIZE(i, p, m);
	eqsv_displ[i] = eqsv_displ[i-1] + eqsv_count[i-1];
      }

      MPI_Gatherv(myresultchunk, lr, MPI_DOUBLE, result, eqsv_count, eqsv_displ, MPI_DOUBLE, rm, MPI_COMM_WORLD);

      free(eqsv_displ);
      free(eqsv_count);
      
    }
    
  

  MPI_Barrier(MPI_COMM_WORLD);
  tcom4 = MPI_Wtime();





  
  
  /* Master-slaves case
     One chunk represents SIZECHUNK (int sizechunk) rows*/
  }else if (method == MASTERSLAVES){

    MPI_Barrier(MPI_COMM_WORLD);
    
    /* First, master rule */
    if(r==rm){

      /* Build an array containing the assignment block->number of rows */
      nbblocks = m/sizechunk;
      if ( (m-nbblocks*sizechunk) != 0)
	nbblocks++;
      
      arr_blocks = (int *) mymalloc(r, nbblocks*sizeof(int));
      
      for(i=0;i<nbblocks;i++)
	arr_blocks[i] = BLOCK_SIZE(i, nbblocks, m);

      //printf("Block Array: ");
      //for(i=0;i<nbblocks;i++)
      //printf("%d ", arr_blocks[i]);
      //puts("\n\n");

      /* Array containing the assignment node->block */
      arr_nodes = (int *) mymalloc(r, p*sizeof(int));

      rsent = 0;		/* Number of rows already sent */
      rrecv = 0;		/* Number of rows already received */
      terminated = 0;

      /* Sizechunk is the maximum size of computed result's chunk */
      buffer = (double *) mymalloc(r, sizechunk*sizeof(double));

      do{

	/* Recv something */
	MPI_Recv(buffer, sizechunk, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
	MPI_Get_count(&status, MPI_DOUBLE, &rcvsize);
	
	if(status.MPI_TAG == 2){ /* Slave has sent result fragment */	  

	  /* Check size */
	  tmpsize = arr_blocks[arr_nodes[status.MPI_SOURCE]];
	  if (tmpsize != rcvsize){
	    printf("ERROR: Master %d received bad array from slave %d\n.", r, status.MPI_SOURCE);
	    MPI_Abort(MPI_COMM_WORLD, 1);
	  }
	  
	  pad=0;
	  for(i=0;i<arr_nodes[status.MPI_SOURCE];i++)
	    pad += arr_blocks[i];
	  
	  for(i=0;i<rcvsize;i++)
	    result[pad+i] = buffer[i];

	  rrecv++;
	}
	
	/* Anyway, this node said it is alive, lets feed it! */
	if(rsent < nbblocks){

	  pad = 0;
	  for(i=0;i<rsent;i++){
	    pad += arr_blocks[i];
	  }

	  MPI_Send(matrix[pad], arr_blocks[rsent]*n, MPI_DOUBLE, status.MPI_SOURCE, 2, MPI_COMM_WORLD);
	  arr_nodes[status.MPI_SOURCE] = rsent;

	  rsent++;

	}else{
	  MPI_Send(NULL, 0, MPI_CHAR, status.MPI_SOURCE, 2, MPI_COMM_WORLD);
	  terminated++;
	}
	
      }while(terminated<p-1);

    }else{			/* And slaves part */

      /* Build maximum possible size arrays */
      mymatrixchunk = (double **) mymalloc(r, sizechunk*sizeof(double *));
      mymatrixchunk[0] = (double *) mymalloc(r, sizechunk*n*sizeof(double));
      for(i=0;i<sizechunk;i++)
	mymatrixchunk[i] = mymatrixchunk[0] + n*i;
      
      myresultchunk = (double *) mymalloc(r, sizechunk*sizeof(double));
	
      /* First, acknowledge it is alive */
      //printf("Im %d: Ready to send that im ready!\n", r); fflush(stdout);
      MPI_Send(vector, 1, MPI_DOUBLE, rm, 3, MPI_COMM_WORLD);
      //printf("Im %d: I just sent my acknowledgement to %d!\n", r, rm);

      
      while(1){

	/* Need to determine the size of the message */
	MPI_Recv(mymatrixchunk[0], sizechunk*n, MPI_DOUBLE, rm, 2, MPI_COMM_WORLD, &status);
	MPI_Get_count(&status, MPI_DOUBLE, &lrn);

	if(!lrn)
	  break;

	lr = lrn/n;
	mynbr += lr;
	mynbpackets ++;
	
	tcomp1 = MPI_Wtime();
	/* Actual computation */
	for(i=0;i<lr;i++){
	  myresultchunk[i] = 0.0;
	  for(j=0;j<n;j++){
	    myresultchunk[i] += mymatrixchunk[i][j]*vector[j];
	  }
	  for(j=0;j<loop;j++);	/* Empty loop */
	}
	tcomp2 = MPI_Wtime();

	mytime += tcomp2-tcomp1;
	
	/* Send the message back to the master */
	MPI_Send(myresultchunk, lr, MPI_DOUBLE, rm, 2, MPI_COMM_WORLD);
      }
    }
  }

  /* End global performance measurement */
  tcomcomp2 = MPI_Wtime();
  gt2 = MPI_Wtime();

  /* Gather each node's time and number of rows */
  times = (double *) mymalloc(r, p*sizeof(double));
  nbr = (int *) mymalloc(r, p*sizeof(int));
  nbpackets = (int *) mymalloc(r, p*sizeof(int));
  hostnames = (char *) mymalloc(r, p*LEN*sizeof(char));
  ips = (char *) mymalloc(r, p*LEN*sizeof(char));

  MPI_Gather(&mytime, 1, MPI_DOUBLE, times, 1, MPI_DOUBLE, rm, MPI_COMM_WORLD);
  MPI_Gather(&mynbr, 1, MPI_INT, nbr, 1, MPI_INT, rm, MPI_COMM_WORLD);
  MPI_Gather(&mynbpackets, 1, MPI_INT, nbpackets, 1, MPI_INT, rm, MPI_COMM_WORLD);
  MPI_Gather(myhostname, LEN, MPI_CHAR, hostnames, LEN, MPI_CHAR, rm, MPI_COMM_WORLD);
  MPI_Gather(myip, LEN, MPI_CHAR, ips, LEN, MPI_CHAR, rm, MPI_COMM_WORLD);

  /* Print result vector */
  if(r==rm){
    
    //    bzero(method_name, 64);
    //strncpy(method_name, (method==MASTERSLAVES) ? "ms":"eq", strlen("ms"));
    method_name = (method==MASTERSLAVES) ? "ms" : "eq";
    
    if(print){
      printf("Result (%s): ", method_name);
      for(i=0;i<m;i++)
	printf("%.2f ", result[i]);
      printf("\n");
    }

    if(summary){
      printf("Summary\n");
      printf("\tmatrix: %dx%d\n", m,n);
      printf("\trank = %d\n", r);
      printf("\tmaster = %d\n", rm);
      printf("\tfirst master = %d\n", frm);
      printf("\t#processors = %d\n", p);
      printf("\tmethod: %d (%s)\n", method, method_name);
      printf("\tsizechunk (for ms): %d\n", sizechunk);
      printf("\teq_send (for eq): %d (%s)\n", eq_send, (eq_send==EQ_SR) ? "MPI Send/Recv" : "MPI Scatterv/Gatherv");
      printf("\tprint result vector: %c\n", (print) ? 'y':'n');
      printf("\tdisplay time statistics: %c\n", (time) ? 'y':'n');
      printf("\tdisplay warnings: %c\n", (warning) ? 'y':'n');
      printf("\tdisplay summary: %c\n", (summary) ? 'y':'n');
      printf("\tTime = %fs (Communication + Computation)\n\n", tcomcomp2 - tcomcomp1); 
    }
    
    if(time){

      iotime = tio2-tio1;
      totaltime = gt2-gt1;
      worktime = tcomcomp2-tcomcomp1;
      printf("Total time (%s): %fs\n", method_name, totaltime);
      printf("\tIO time: %fs\n", iotime);
      printf("\tWork time: %fs\n", worktime);
      if(method==EQUAL){
	com1time = tcom2-tcom1;
	com2time = tcom4-tcom3;
	comptime = tcom3-tcom2;
	comtime = tcom4 - tcom0 - comptime;
	printf("\t\tCommunication time: %fs\n", comtime);
	printf("\t\t\tBcast, start overhead time: %fs\n", tcom1-tcom0);
	printf("\t\t\tSend time (%s): %fs\n", (eq_send == EQ_SR) ? "Send" : "Scatterv", com1time);
	printf("\t\t\tReceive time (%s): %fs\n", (eq_send == EQ_SR) ? "Recv" : "Gatherv", com2time);
	printf("\t\tComputation time: %fs\n", comptime);
	for(i=0;i<p;i++)
	  printf("\t\t\tNode %d (%s-%s): %fs (%d rows).\n", i, hostnames+i*LEN, ips+i*LEN, times[i], nbr[i]);
	printf("Statistics:\n\n");
	printf("\tratio IO/total: %.2f%%\n", iotime/totaltime*100.0);
	printf("\tratio work/total: %.2f%%\n\n", worktime/totaltime*100.0);
	printf("\tratio communication/work: %.2f%%\n", comtime/worktime*100.0);
	printf("\tratio computation/work: %.2f%%\n\n", comptime/worktime*100.0);
	printf("\tratio communication/computation: %.2f%%\n", comtime/comptime*100.0);
      }else{
	printf("\tComputation times:\n");
	for(i=0;i<p;i++)
	  printf("\t\tNode %d (%s-%s): %fs (%d packets, %d rows).\n", i, hostnames+i*LEN, ips+i*LEN, times[i], nbpackets[i], nbr[i]);
	printf("Statistics:\n");
	printf("\tratio IO/total: %.2f%%\n", iotime/totaltime*100.0);
	printf("\tratio work/total: %.2f%%\n", worktime/totaltime*100.0);
      }
      puts("\n");
    }
  }
  

  MPI_Finalize();
  exit(0);
}
