/*
 *  wreka_MPI.c
 *  net_MPI
 *
 *  Created by Basile Clout on 28/06/07.
 *  Copyright 2007 Basile Clout. All rights reserved.
 * 
 * 	netMPI version for use with wrekamap.
 * 	Easily grepable output, statistics, and various improvments
 *
 */

#include "net_MPI2.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <string.h>

int usage() {

	printf("\n\n");
	printf("wreka_MPI uses the netMPI library to evaluate the MPI's latency and bandwidth in the parallel network.\n\n");
	printf("Usage: mpirun -np 4 --hostfile nodes wreka_MPI [OPTIONS]\n\n");
	printf("Examples:\n");
	printf("    mpirun -np 4 --hostfile lamhosts wreka_MPI --bsr -p network.log\n");
	printf("    # Describes the network using the bidirectional MPISend/MPIRecv method and print the output in network.log.\n\n");
	printf("Options:\n");
	printf("\t --sr\t\t Use Monodirectional MPI Send/Recv.\n");
	printf("\t --bsr\t\t Use Bidirectional MPI Send/Recv.\n");
	printf("\t --isr\t\t Use Monodirectional MPI ISend/IRecv.\n");
	printf("\t --bisr\t\t Use Bidirectional MPI ISend/IRecv.\n");
	printf("-m,\t --master \t\t Node number of the master node (DEFAULT: 0)\n");
	printf("-s,\t --size \t\t Bit left switch defining the size of the test (in bytes) (DEFAULT: 22)\n");
	printf("-k --skip	\t\t Effectively skip the s first tests (DEFAULT: 3).\n");
	printf("-n,\t --tests \t\t Number of (effective) tests for one test size (DEFAULT: 10)\n");
	printf("-r,\t --requests \t\t Number of parallel requests for an Asynchronous MPI ISend/IRecv (DEFAULT: 16).\n");
	printf("-d, \t --debug	\t\t Print the complete list of measured values.\n");
	printf("-l \t --live \t\t Print the values as they are calculated.\n");
	printf("-p,\t --print \t\t Reroute the standard output.\n");
	printf("-h,\t --help \t\t Print this help\n");
	printf("\n\n");
	printf("Example\n: ex_MPI --isr -m 2 -k 2 -n 5 -s 21 -r 4 -p example.log\n");
	printf("\tPrint the bandwidth and latency values in the network between node 2 (-m 2) and the other nodes of the cluster. Use the monodirectional MPI Send/Recv method, for datagram sizes between 1 << 21 (-b 21) and 1 << 22. For each test performed, average the value on 3 attempts (-n 3) abd skipping the 2 (-s 2) first values. If the test is a isr or bisr, use 4 parallel requests (-r 4)");
	printf("Output:\n ");
	printf("\t config: master rank, number of tasks, method (0=SR, 2=ISR), bandwidth test's size (bytes), latency (bytes) test's size, total # of tests,  # of tests skipped, # of parallel requests for ISR, help?, debug?, live?\n");
	printf("\t tasks: task rank, hostname, IP address (ipv4)\n");
	printf("\t results: task number, avg latency (ms), min lat, max lat, stdev lat, avg bandwidth (Mbits/s), min bdw, max bdw, stdev bdw\n");
	return 1;
}

int main(int argc, char **argv) {

  static int method= M_ISR;
  static int debug = 0;
  static int fl_live = 0;

  char *filename= NULL;
  char myname[LENGTH];
  struct hostent *host;
  struct in_addr *myip;
  char hostname[LENGTH];
  char ip[LENGTH];
  char strrank[LENGTH];
  char *rbuf;
  char sendarray[3*LENGTH];
	
  int i, j, c, l;
  int g;
  int option_index = 0;

  struct configuration *conf= NULL;

  double *latency;

  double ***arr_results;

  FILE *stream;
  stream = stdout;

  char buffer[LENGTH];
  time_t curtime;
  struct tm *loctime;

  /* DEFAULT VALUES */
  conf = (struct configuration *) malloc(sizeof(struct configuration));
  conf->master = 0;
  conf->size = 1<<MAX_COEF_SIZE_T1;
  conf->skip = SKIP;
  conf->nb_tests = NB_TESTS;
  conf->isr_requests = NB_REQUESTS;
  conf->method = method;
  conf->fl_help = 0;
  conf->lat_size = 1;
  conf->head = NULL;
  conf->fl_debug = 0;
  conf->fl_live = 0;

  /* PARSE COMMAND ARGUMENTS */
  static struct option long_options[] = {

    { "sr", no_argument, &method, 1 }, { "bsr", no_argument, &method, M_BSR },
    { "isr", no_argument, &method, M_ISR }, { "bisr", no_argument,
					      &method, M_BISR }, { "debug", no_argument,
								   &method, 'd' },{ "live", no_argument,
										    &method, 'l' },

    { "master", required_argument, 0, 'm' }, { "skip",
					       required_argument, 0, 's' }, { "size", required_argument,
									      0, 's' }, { "tests", required_argument, 0, 'n' }, {
      "requests", required_argument, 0, 'r' }, { "print",
						 required_argument, 0, 'p' }, { "help", required_argument,
										0, 'h' }, { 0, 0, 0, 0 } };

  while (1) {

    g = getopt_long (argc, argv, "hdlm:s:k:n:r:p:", long_options,
		     &option_index);

    //	printf("g=%d, optarg=%s\n", g, optarg);
    if (g == -1)
      break;

    switch (g) {

    case 0: 
      if (long_options[option_index].flag != 0)
	break;
      else
	printf("Oooooops handling long options\n");

    case 'm':
      conf->master = (int)atoi(optarg);
      break;

    case 'k':
      conf->skip = (int)atoi(optarg);
      break;

    case 's':
      conf->size = 1<<(int)atoi(optarg);
      break;

    case 'n':
      conf->nb_tests = (int)atoi(optarg);
      break;

    case 'r':
      conf->isr_requests = (int)atoi(optarg);
      break;

    case 'h':
      conf->fl_help = 1;
      break;
		
    case 'd':
      conf->fl_debug = 1;
      break;

    case 'l':
      conf->fl_live = 1;
      break;

    case 'p':
      filename = optarg;
      if ((stream = fopen(filename, "a")) == NULL)
	printf("Impossible to open %s\n", filename);
      break;

    case '?':
      break; /* Error in the given arguments */
    }
  }

  if (optind < argc && filename == NULL) {
    filename = argv[optind];
    if ((stream = fopen(filename, "a")) == NULL)
      printf("Impossible to open %s\n", filename);
  }

  MPI_Init(&argc, &argv);

  MPI_Comm_rank(MPI_COMM_WORLD, &(conf->myrank));
  MPI_Comm_size(MPI_COMM_WORLD, &(conf->nb_tasks));
  conf->method = method;

  /*Change error handler*/
  MPI_Errhandler_set(MPI_COMM_WORLD,MPI_ERRORS_RETURN);

  if(conf->myrank == conf->master && conf->fl_help)
    usage();

  /* Greetings */
  gethostname(myname, sizeof(myname));
  if((host = gethostbyname(myname)) == NULL ) {
    herror("Error with gethostbyname(myname): ");
    exit(1);
  }
  myip = (struct in_addr*)host->h_addr; /* remote ip */

  if((host = gethostbyaddr(myip, sizeof(*myip), AF_INET)) == NULL) {
    herror("Error with gethostbyaddr");
    exit(1);
  }
	
  rbuf = (char *)malloc(conf->nb_tasks*LENGTH*3*sizeof(char));
  sprintf(strrank, "%d", conf->myrank);
	
	
	
  strncpy(sendarray, strrank, LENGTH);
  strncpy(sendarray+LENGTH, host->h_name, LENGTH);
  strncpy(sendarray+2*LENGTH, inet_ntoa(*myip), LENGTH);
  MPI_Gather(sendarray, LENGTH*3, MPI_CHAR, rbuf, LENGTH*3, MPI_CHAR, conf->master, MPI_COMM_WORLD);
	
	
  if(conf->myrank == conf->master){ 
    printf("config\n %d %d %d %d %d %d %d %d %d %d %d\n", conf->master, conf->nb_tasks, conf->method, conf->size, conf->lat_size, conf->nb_tests, conf->skip, conf->isr_requests, conf->fl_help, conf->fl_debug, conf->fl_live);
    printf("tasks\n");
    for(i=0;i<conf->nb_tasks;i++)
      printf("%s %s %s\n", rbuf+(LENGTH*3*i), rbuf+(LENGTH*3*i)+LENGTH, rbuf+(LENGTH*3*i)+2*LENGTH);
  }

  /* Build the nodes structures */

  struct node *head = (struct node *) malloc(sizeof(struct node));
  struct node *_node = NULL;
  conf->head = head;
		

  head->master = conf->master; 
  head->rank = (conf->master == 0) ? 1 : 0;
  head->lat = NULL;
  head->bdw = NULL;
  head->next = NULL;		/* AAAAHHHH Forget to initialize to NULL, 90 minutes to debug (case: only 2 computers) */

  for (i=0; i<conf->nb_tasks; i++) {

    if (i == conf->master || i == head->rank)
      continue;
    _node = (struct node *) malloc(sizeof(struct node));
    _node->master = conf->master;
    _node->rank = i;
    _node->lat = NULL;
    _node->bdw = NULL;
    _node->next = NULL;
    head->next = _node;
    head = head->next;
  }

  MPI_Barrier(MPI_COMM_WORLD);

  /*  struct node *mynode = conf->head;
  if(conf->master == conf->myrank){
    do{
      printf("rank = %d\n", mynode->rank);
      mynode = mynode->next;
	}while(mynode != NULL);

	}*/
	

  /* Get the latency and bandwidths stats */

  if (conf->myrank == conf->master)
    printf("results\n");
	
	
  head = conf->head;
  double *arr_dummy = NULL;
  struct latency *lat = NULL;
  struct bandwidth *bdw = NULL;

  do {
    
    if (conf->myrank == conf->master && conf->fl_live ==1){
      printf("%d %d ", head->master, head->rank);
    }
		
    if(conf->myrank == conf->master){
      lat = (struct latency *) malloc(sizeof(struct latency));
      if(get_latency_master(conf, head, lat) == 1){
	if(conf->master == conf->myrank)
	head->lat = lat;
	if (conf->fl_live == 1)
	  printf("%.2f %.2f %.2f %.2f ", lat->avg, lat->min, lat->max, lat->stdev);
      }
      else{
	printf("Error when determining latency\n");
	MPI_Finalize();
	exit(1);
      }
    }else{
      net_MPI_lat_bsr(conf, head, arr_dummy);
    }
		
		
    if(conf->myrank == conf->master){
      bdw = (struct bandwidth *) malloc(sizeof(struct bandwidth));
      if(get_bandwidth_master(conf, head, bdw) == 1){
	head->bdw = bdw;
	if (conf->fl_live == 1)
	  printf("%.2f %.2f %.2f %.2f\n", bdw->avg, bdw->min, bdw->max, bdw->stdev);
      }
      else{
	printf("Error when determining bandwidth\n");
	exit(1);
      }
    }else{
      net_MPI_bdw_isr(conf, head, arr_dummy);
    }
    head = head->next;

  }while(head != NULL);
	
  MPI_Barrier(MPI_COMM_WORLD);
  head = conf->head;

  /* Print results*/
  if(conf->master == conf->myrank && conf->fl_live == 0){

    do{
      printf("%d %d ", head->master, head->rank);
			
      printf("%.2f %.2f %.2f %.2f ", head->lat->avg, head->lat->min, head->lat->max, head->lat->stdev);
      if (conf->fl_debug == 1){
	printf("[");
	for (i=0;i<head->lat->len; i++)
	  printf("%.2f ", head->lat->array[i]);
	printf("] ");
      }
			
      printf("%.2f %.2f %.2f %.2f ", head->bdw->avg, head->bdw->min, head->bdw->max, head->bdw->stdev);
      if (conf->fl_debug == 1){
	printf("[");
	for (i=0;i<head->bdw->len; i++)
	  printf("%.2f ", head->bdw->array[i]);
	printf("]");
      }
      printf("\n");
      head = head->next;
    }while(head != NULL);
  }

  MPI_Finalize();
  return 0;
}

