#include <assert.h>
#include<mpi.h>
#include<stdlib.h>
#include <iostream>
#include <papi.h>
using namespace std;

#define INPUT_SIZE 80000
#define INPUT_RANGE 40000

typedef unsigned long int INDEX;

int init() { return 0;}
int accum(int t, int d) { return t+d;}
int combine(int left, int right) { return left+right;}
int scan(int t, int not_used) { return t;}

int main(int argc, char* argv[]){
  int np;
  int rank;
  int ierr;
  MPI_Status status;
  INDEX size;

  int* data;
  int* ldata;
  int* result;
  long long start;
  long long end;
  // MPI init
  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &np);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

  // Generate input & init
  if(rank==0){
    // initialize PAPI library
    unsigned long chkflg = PAPI_VER_CURRENT;
    PAPI_library_init(chkflg);
    if (chkflg != PAPI_VER_CURRENT) {
      cout<<"Error PAPI Library out of date"<<endl;
      cout.flush();
      exit(1);
    }
    data = (int*) new int [INPUT_SIZE];
    result = (int*) new int [INPUT_SIZE];

    for(INDEX i=0;i<INPUT_SIZE;i++){
      data[i] = rand()%INPUT_RANGE;
      //data[i] = i%INPUT_RANGE;
    }
  }
  size = INPUT_SIZE/np;
  //timing 1
  MPI_Barrier(MPI_COMM_WORLD);
  if(rank==0){
    start=PAPI_get_real_usec();
  }
  /***** Calculate the histogram *****/
  int* RTally = new int[INPUT_RANGE];
  int* RTallyl = new int[INPUT_RANGE];
  int* histogram = new int[INPUT_RANGE];
  int stride = 1;
  ldata = new int[size];

  // Allocate memory for local process
  for(INDEX i=0; i<INPUT_RANGE; i++)
    RTally[i] = 0;

  // Distribute data for local process
  ierr = MPI_Scatter(data, size, MPI_INT,
      ldata, size, MPI_INT,
      0, MPI_COMM_WORLD);

  // Local accumulation
  for(INDEX i=0; i<size; i++) {
    RTally[ldata[i]]++;
  }

  // Moving up in the tree
  while(stride < np) {
    MPI_Barrier(MPI_COMM_WORLD);
    if (rank%(2*stride) == 0) {
      MPI_Recv(RTallyl, INPUT_RANGE, MPI_INT, rank+stride, 0,  MPI_COMM_WORLD, &status);
      // Combine
      for(INDEX i=0; i<INPUT_RANGE; i++)
        RTally[i] += RTallyl[i];
    }
    else if ((rank-stride)%(2*stride) == 0) {
      MPI_Send(RTally, INPUT_RANGE, MPI_INT, rank-stride, 0, MPI_COMM_WORLD);
    }
    stride = 2*stride;
  }

  MPI_Barrier(MPI_COMM_WORLD);
  // Reduce result
  if (rank == 0) {
    for(INDEX i=0; i<INPUT_RANGE; i++) {
      histogram[i] = RTally[i];
      //cout<<histogram[i]<<" ";
    }
    //cout<<endl;
  }

  // Clean up
  delete [] RTally;
  delete [] RTallyl;
  delete [] ldata;


  /***** Sort the array *****/

  int myTally;
  int lTally;
  int myTallyt;
  int pTally;
  stride = 1;

  size = INPUT_RANGE/np;

  // Allocate memory for local process
  ldata = (int*) new int [size];
  int* lresult = (int*) new int [size];
  int* starting_index = (int*) new int [INPUT_RANGE+1];
  starting_index[0]=0;
  MPI_Barrier(MPI_COMM_WORLD);
  // Distribute data for local process
  ierr = MPI_Scatter(histogram, size, MPI_INT,
      ldata, size, MPI_INT,
      0, MPI_COMM_WORLD);

  // Local process
  myTally = init();
  for(INDEX i=0; i<size; i++){
    myTally = accum(myTally, ldata[i]);
  }

  // Going up
  while(stride < np) {
    //lTally[rank+stride] = myTally[rank];
    //myTally[rank] = combine(myTally[rank], myTally[rank+stride]);
    MPI_Barrier(MPI_COMM_WORLD);
    if (rank%(2*stride) == 0) {
      MPI_Send(&myTally, 1, MPI_INT, rank+stride, 0, MPI_COMM_WORLD);
      MPI_Recv(&myTallyt, 1, MPI_INT, rank+stride, 0, MPI_COMM_WORLD, &status);
      myTally = combine(myTally, myTallyt);
    }
    if ((rank-stride)%(2*stride) == 0) {
      MPI_Recv(&lTally, 1, MPI_INT, rank-stride, 0, MPI_COMM_WORLD, &status);
      MPI_Send(&myTally, 1, MPI_INT, rank-stride, 0, MPI_COMM_WORLD);
    }
    stride = 2*stride;
  }

  if (rank == 0) {
    myTally = init();
  }

  // Propogate down
  stride = np/2;
  while (stride >= 1) {
    MPI_Barrier(MPI_COMM_WORLD);
    //myTally[rank+stride] = combine(myTally[threadid], lTally[rank+stride]);
    if (rank%(2*stride) == 0) {
      MPI_Send(&myTally, 1, MPI_INT, rank+stride, 0, MPI_COMM_WORLD);
    }
    if ((rank-stride)%(2*stride) == 0) {
      MPI_Recv(&myTallyt, 1, MPI_INT, rank-stride, 0, MPI_COMM_WORLD, &status);
      myTally = combine(myTallyt, lTally);
    }
    stride = stride/2;
  }

  MPI_Barrier(MPI_COMM_WORLD);
  pTally = myTally;
  for(INDEX i=0; i<size; i++) {
    pTally=accum(pTally, ldata[i]);
    lresult[i] = scan(pTally, ldata[i]);
  }
  if(rank==0){
    for(INDEX i=0; i<size; i++) {
      starting_index[i+1] = lresult[i];
    }
    for(int i=1; i<np;i++){
      MPI_Recv(starting_index+i*size+1, size, MPI_INT, i, 0, MPI_COMM_WORLD, &status);
    }
  }else{
    MPI_Send(lresult, size, MPI_INT, 0, 0, MPI_COMM_WORLD);
  }
  
  // Clean up
  delete [] ldata;
  delete [] lresult;
  if(rank==0){
    for(INDEX i=0;i<INPUT_SIZE;i++) {
      result[starting_index[data[i]]] = data[i];
      starting_index[data[i]]++;
    }
    //for(INDEX i=0;i<INPUT_SIZE;i++) {
    //  cout<<result[i]<<" ";
    //}
    //printf("\n");
  }
  //timing 2
  MPI_Barrier(MPI_COMM_WORLD);
  if(rank==0){
    end=PAPI_get_real_usec();
    cout<<"size "<<INPUT_SIZE<<" time "<<end-start<<" np "<<np<<endl;
    int* check_result = new int [INPUT_SIZE];
    for(INDEX i=0; i<INPUT_RANGE+1; i++)
      starting_index[i] = 0;
    for(INDEX i=0; i<INPUT_SIZE; i++)
      starting_index[data[i]+1]++;
    for(INDEX i=1; i<INPUT_RANGE; i++)
      starting_index[i] += starting_index[i-1];
    for(INDEX i=0; i<INPUT_SIZE; i++) {
      check_result[starting_index[data[i]]] = data[i];
      starting_index[data[i]]++;
    }
    for(INDEX i=0; i<INPUT_SIZE; i++) {
      if (check_result[i] != result[i]) {
        cout<<check_result[i]<<" "<<i<<" "<<result[i]<<endl;
        exit(-1);
      }
    }
    cout<<"Sort complete"<<endl;

  }
  MPI_Finalize();
  return 0;
}
