// mpiSorter.cpp
//
// This code has been adapted from our sorter.cpp file and
// an MPI tutorial found at http://www.lam-mpi.org/tutorials/one-step/ezstart.php
//
// modified by: Kourtni Marshall
//
// NOTE: this program is currently hanging when MPI_recv is called in the master function

#include "stdafx.h"
#include "cstdlib"
#include "iostream"
#include "vector"
#include "ctime"
#include "mpi.h"

#define WORKTAG 1
#define DIETAG 2

using namespace std;
/* Local functions */

static void master(void);
static void slave(void);
static long long get_next_work_item(void);
static void process_results(vector<int> result);
static vector<int> do_work(long long work);

//Global Vars
long long nums_2_sort, nums_left, work_size;
int ceiling;
vector<int> v;

int main(int argc, char **argv)
{
  int myrank;
//	cout << strtod(argv[1], NULL) << " numbers were passed to sorter to be sorted by "
//		<< strtod(argv[2], NULL) << " processors." << endl;
//	nums_2_sort = nums_left = (long long)strtod(argv[1], NULL);
//	int p = (int)strtod(argv[2], NULL);
	nums_2_sort = nums_left = 1000; //nums_2_sort and p here are hard coded for debugging purposes
	int p = 2;
	nums_2_sort = (nums_2_sort < 0) ? 0 - nums_2_sort : nums_2_sort; //change sign if nums_2_sort < 0
	ceiling = (nums_2_sort <= RAND_MAX) ? (int)nums_2_sort : RAND_MAX;
	v.assign(ceiling, 0);
	//divide by processors
	work_size = nums_left / p;
	

  /* Initialize MPI */

  MPI_Init(&argc, &argv);

	int start, end; double elapsed;
	start = clock();

  /* Find out my identity in the default communicator */

  MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
  if (myrank == 0) {
    master();
  } else {
    slave();
  }

  end = clock(); elapsed = (double)(end - start)/CLOCKS_PER_SEC;
  printf ("It took %.4lf seconds to sort %.1d numbers.\n", elapsed, nums_2_sort); //print sort time
  //Print sorted output
	bool first = true;
	for(int i = 0; i < ceiling; ++i){
		while(v[i] != 0){
			if(first){cout << i; first = false;}
			else{cout << ", " << i;}
			--v[i];
		}
	}
  /* Shut down MPI */

  MPI_Finalize();
  return 0;
}


static void master(void)
{
  int ntasks, rank;
  long long work;
  vector<int> result;
  MPI_Status status;

  /* Find out how many processes there are in the default
     communicator */

  MPI_Comm_size(MPI_COMM_WORLD, &ntasks);

  /* Seed the slaves; send one unit of work to each slave. */

  for (rank = 1; rank < ntasks; ++rank) {

    /* Find the next item of work to do */

    work = get_next_work_item();

    /* Send it to each rank */

    MPI_Send(&work,             /* message buffer */
             1,                 /* one data item */
             MPI_INT,           /* data item is an integer */
             rank,              /* destination process rank */
             WORKTAG,           /* user chosen message tag */
             MPI_COMM_WORLD);   /* default communicator */
  }

  /* Loop over getting new work requests until there is no more work
     to be done */

  work = get_next_work_item();
  while (work != NULL) {

    /* Receive results from a slave */

    MPI_Recv(&result,           /* message buffer */
             1,                 /* one data item */
             MPI_DOUBLE,        /* of type double real */
             MPI_ANY_SOURCE,    /* receive from any sender */
             MPI_ANY_TAG,       /* any type of message */
             MPI_COMM_WORLD,    /* default communicator */
             &status);          /* info about the received message */

    /* Send the slave a new work unit */

    MPI_Send(&work,             /* message buffer */
             1,                 /* one data item */
             MPI_INT,           /* data item is an integer */
             status.MPI_SOURCE, /* to who we just received from */
             WORKTAG,           /* user chosen message tag */
             MPI_COMM_WORLD);   /* default communicator */

    /* Get the next unit of work to be done */

    work = get_next_work_item();
  }

  /* There's no more work to be done, so receive all the outstanding
     results from the slaves. */

  for (rank = 1; rank < ntasks; ++rank) {
    MPI_Recv(&result, 1, MPI_DOUBLE, MPI_ANY_SOURCE,
             MPI_ANY_TAG, MPI_COMM_WORLD, &status);
  }

  /* Tell all the slaves to exit by sending an empty message with the
     DIETAG. */

  for (rank = 1; rank < ntasks; ++rank) {
    MPI_Send(0, 0, MPI_INT, rank, DIETAG, MPI_COMM_WORLD);
  }
}


static void slave(void)
{
  long long work;
  vector<int> results;
  MPI_Status status;

  while (1) {

    /* Receive a message from the master */

    MPI_Recv(&work, 1, MPI_INT, 0, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);

    /* Check the tag of the received message. */

    if (status.MPI_TAG == DIETAG) {
      return;
    }

    /* Do the work */

    vector<int> result = do_work(work);

    /* Send the result back */

    MPI_Send(&result, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD);
  }
}


static long long get_next_work_item(void)
{
  /* Fill in with whatever is relevant to obtain a new unit of work
     suitable to be given to a slave. */
	if(nums_left == 0){ return NULL;}
	if(nums_left > work_size){nums_left -= work_size;}
	else{work_size = nums_left;}
	return work_size;
}


static void process_results(vector<int> result)
{
  /* Fill in with whatever is relevant to process the results returned
     by the slave */
	for(int i = 0; i < ceiling; ++i){v[i] += result[i];}
}


static vector<int> do_work(long long work)
{
  /* Fill in with whatever is necessary to process the work and
     generate a result */
	srand((int)time(NULL)); //seed
	vector<int> sub_v (ceiling, 0); long long i;
	for(i = 0; i < work; ++i){++sub_v[rand() % ceiling];} //bucket sort
	return sub_v;
}
