#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/times.h>
#include <mpi.h>
#include "mytime.h"

#define N_DEF 200
#define TOL_DEF 0.0000001
#define MAXIT_DEF 500

/****************/
int *despl;
int *count;
/****************/

double jacobi_mpi(int miid, double **a, int nfilas, int n, double *b, double *x, double tol, int maxit) {
  int i, j, it = 0;
  double sum, resi_local, resi, *new_x;
  int fin;

  resi = 10.0;
  new_x = (double *) malloc(n*sizeof(double));

  for (i = 0; i < n; i++) x[i] = b[i];

  /* while ( (it < maxit) && (resi > tol) ) {  */
  while (it < maxit) {
    fin = despl[miid] + count[miid];
    resi_local = 0.0;
    for (i = despl[miid]; i < fin; i++) {
      sum = 0.0;
      for (j = 0; j < n; j++) 
        sum += a[i][j]*x[j];
      resi_local += fabs(sum - b[i]);
      sum += -a[i][i]*x[i];
      new_x[i] = (b[i] - sum) / a[i][i];
    }
    
    MPI_Allgatherv (&new_x[despl[miid]], count[miid], MPI_DOUBLE, x, count, despl, MPI_DOUBLE, MPI_COMM_WORLD);
    MPI_Allreduce(&resi_local, &resi, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); 
    it++;
  }


/*  LLC_printMaster("Paralelo Iteraciones %d, Residuo: %g\n", it, resi); */

  free(new_x);

  return resi;

}



int main(int argc, char *argv[]) {
  double tol;
  double **a, *b, *x;
  int i, j, n, nfilas, diag, maxit;
  int nprocs, miid;

  CLOCK_TYPE chrono;
  double mpi_time;
  double mpi_resi;
  int namelen;
  char processor_name[MPI_MAX_PROCESSOR_NAME];

  MPI_Init(&argc,&argv);
  MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
  MPI_Comm_rank(MPI_COMM_WORLD, &miid);
  MPI_Get_processor_name(processor_name, &namelen);

  n = N_DEF;
  maxit = MAXIT_DEF;
  tol = TOL_DEF;

  switch (argc) {
    case 4: tol = atof(argv[3]);
    case 3: maxit = atoi(argv[2]);
    case 2: n = atoi(argv[1]);
  }

  /* Por simplicidad supondremos que n es divisible por el numero de procesos: nprocs */
  nfilas = n/nprocs;

  ReservaMatriz(&a, n, n);
  ReservaVector(&b, n);
  ReservaVector(&x, n);

  if (miid == 0) {
    GeneraVector(b, n);
  }
  MPI_Bcast(b, n, MPI_DOUBLE, 0, MPI_COMM_WORLD);

  /* matriz aleatoria */
  if (miid == 0) {
     GeneraMatriz(a, n, n);
     /* Para garantizar la convergencia del metodo de Jacobi */
     /* Convierte en diagonal dominante la matriz distribuida */
     diag = 0;
     for (i = 0; i < n; i++) {
        for (j = 0; j < n; j++) 
           a[i][diag] += a[i][j];
        diag++;
     }
  }
  /* envio de la matriz */
  for (i=0; i < n; i++) MPI_Bcast(a[i], n, MPI_DOUBLE, 0, MPI_COMM_WORLD); 
  MPI_Barrier (MPI_COMM_WORLD); 
 
  /***************************************************************************
   paralelo homogeneo */

  despl = (int *) malloc( nprocs * sizeof(int));
  count = (int *) malloc( nprocs * sizeof(int));
  despl[0] = 0;
  for (i=0; i< nprocs; i++) {
     count[i] = nfilas;
     if (i) despl[i] = despl[i-1] + count[i-1];
  }
 
  MPI_Barrier (MPI_COMM_WORLD);
  CLOCK_Start (chrono);
  mpi_resi = jacobi_mpi(miid, a, nfilas, n, b, x, tol, maxit);

  CLOCK_End(chrono, mpi_time);
  MPI_Barrier (MPI_COMM_WORLD); 
  /*CompruebaSolucion (a, x, b, n);*/
  
  
  /****************************************************************************
  ****************************************************************************/
  
  printf("%d:%g:%g\n", miid, mpi_time, mpi_resi); 
  
  
  /**************************************************************************
   Libeacion de memoria */
  LiberaMatriz(a);
  LiberaVector(b);
  LiberaVector(x);

  MPI_Finalize();

  return 0;

}
