#include <mpi.h>
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <vector>
#include <list>
#include <cmath>
#include <cstring>
#include <algorithm>
#include "es-bvs-bl-common.h"
#include <omp.h>

//
//  Altered benchmarking program for O(n/p) algorithm
//


void migrate(std::vector<particle_t>& local, double dx, double dy, 
	     int nx, int ny, int nproc, int rank, int nlocal,
	     MPI_Datatype& PARTICLE, std::vector<particle_t> *mpiBins,
	     int NUM_THREADS_X, int NUM_THREADS_Y, std::vector<int> &counters,
	     std::vector<int> &offsets)
{
    TIME("migrate");
    #pragma omp for
    for(int i=0; i<nx*ny*NUM_THREADS_X*NUM_THREADS_Y; i++) {
      mpiBins[i].clear();
      counters[i] = 0;
    }
    #pragma omp barrier	

  
    int i,j,k; 
    int tid = omp_get_thread_num();
    #pragma omp for
    for (i = 0; i < NUM_THREADS_X*NUM_THREADS_Y;i++){
      mpiBins[rank+nx*ny*i].reserve(nlocal);
    }
    #pragma omp barrier	
    { 
      TIME("bin sort");
//      #pragma omp parallel private(tid)
      {
      #pragma omp for //private(j,k) schedule(static)
      for(i = 0; i<nlocal; i++)
      {
	  particle_t& p = local[i];
	  int ii = p.x/dx;
	  int jj = p.y/dy;
	  int index=jj*nx + ii;
	  counters[index + tid*nx*ny]++;
	  /*yes, actually pushing particles in, not pointers, need arrays for gather*/
	  mpiBins[index+tid*nx*ny].push_back(p);
	  /* See if this particle is in a ghost region */
	  for (j = -1; j<= 1; j++){
	    int iii = (p.x+j*cutoff)/dx;
	    if (iii < 0 || iii >= nx)
	      continue;
	    for (k = -1; k<= 1; k++){
	      int jjj = (p.y+k*cutoff)/dy;
	      if (jjj < 0 || jjj >= ny)
		continue;
	      int index_ghost = jjj*nx + iii;
	      /* Make sure we didnt already add to this bin */
	      if (index_ghost != index){
		if (mpiBins[index_ghost + tid*nx*ny].empty() ||
		    mpiBins[index_ghost + tid*nx*ny].back().globalID != p.globalID) 
		  {
		    counters[index_ghost + tid*nx*ny]++;
		    mpiBins[index_ghost + tid*nx*ny].push_back(p);
		  }
	      }
	    }
	  }
	}
	#pragma omp barrier	
	#pragma omp for schedule(static)
	for (i = 0; i < nx*ny; i++){
	  if (i != rank) {
	    int offset = counters[i];
	    for (j = 1; j < NUM_THREADS_X*NUM_THREADS_Y; j++){
	      counters[i] += counters[i + j*nx*ny];	    
	    }
	    
	    mpiBins[i].resize(counters[i]);
	    for (j = 1; j < NUM_THREADS_X*NUM_THREADS_Y; j++){
	      memcpy(&(mpiBins[i][offset]), &(mpiBins[i+j*nx*ny][0]), 
		     counters[i + j*nx*ny]*sizeof(particle_t));
	      offset += counters[i + j*nx*ny];
      
	    }/*
	    for (j = 1; j < NUM_THREADS_X*NUM_THREADS_Y; j++){
	      for (std::vector<particle_t>::iterator it = mpiBins[i+j*nx*ny].begin();
		   it != mpiBins[i+j*nx*ny].end();
		   it++){
		mpiBins[i].push_back(*it);
	      }
	    }*/
	  }	    
	}
	int total_stay;
	#pragma omp barrier	
	#pragma omp master
	{
	  total_stay = counters[rank];
	  for (j = 1; j < NUM_THREADS_X*NUM_THREADS_Y; j++){
	    total_stay += counters[rank + j*nx*ny];
	    
	    counters[rank + j*nx*ny] += counters[rank + (j-1)*nx*ny];	    
	  }
	  local.resize(total_stay);
	}
	#pragma omp barrier

	if (tid == 0)
	  memcpy(&(local[0]), &(mpiBins[rank][0]), 
		 counters[rank]*sizeof(particle_t));
	else 
	  memcpy(&(local[counters[rank+(tid-1)*nx*ny]]), &(mpiBins[rank+tid*nx*ny][0]), 
		 (counters[rank + tid*nx*ny]-counters[rank + (tid-1)*nx*ny])*sizeof(particle_t));
	#pragma omp barrier
	#pragma omp master 
	{
	  counters[rank] = total_stay;
	}
	
	}
    }
    #pragma omp barrier	
    #pragma omp master
    {
      std::vector<int> allCounters(nx*ny*nproc);

    {
      TIME("Barrier before allgather");
      MPI_Barrier(MPI_COMM_WORLD);
    }
    {
      TIME("allgather");
      MPI_Allgather(&(counters[0]), nx*ny, MPI_INT, &(allCounters[0]), 
		    nx*ny, MPI_INT, MPI_COMM_WORLD);
    }
    offsets.clear(); 
    offsets.resize(1,allCounters[rank+rank*nx*ny]);
    // OK, we have the global histograms for all processors, on all processors.  
    // after 1000 processors this won't scale well, but a random
    // intial global distribution is also looking hard at that scale.
    {
      TIME("build counters");
      nlocal = 0;

      for(i=0; i<nproc; i++)
	{
	  int index = rank + i*nx*ny;
	  nlocal+= allCounters[index];
	  if (i!= rank){
	    offsets.push_back(offsets.back()+allCounters[index]);
	  }
	  else 
	    offsets.push_back(offsets.back());
	
	}
#ifdef DEBUG
      if(rank==0)
	{
	  printf("[%d] nlocal = %d   ", rank, nlocal);
	  for(i=0; i<offsets.size(); i++)
	    printf("%d ",offsets[i]);
	  fflush(stdout);
	}
#endif
    }
    local.resize(nlocal);
    }
    #pragma omp barrier	
    
    int totalS=0, totalR=0;
    std::vector<MPI_Status>  Status;
    std::list<MPI_Request> Requests;//[NUM_THREADS_X*NUM_THREADS_Y];
//in parallel again
    {
      TIME("Isend+Irecv");

      #pragma omp for //reduction(+:totalS, totalR)
      for(int n=rank; n<nproc+rank; n++)
	{
	  int other = n%nproc;

	  int numSend = counters[other];
	  if(numSend > 0)
	    {
	      if(rank!= other)
		{
		  MPI_Request R;
		  Requests.push_back(R);
		  MPI_Isend(&(mpiBins[other][0]), numSend, PARTICLE, other,
			    0, MPI_COMM_WORLD, &(Requests.back()));
		  totalS += numSend;
		}
	    }
	  int numRecv = offsets[other+1]-offsets[other];
	  if(numRecv > 0 && rank!= other)
	    {
	      MPI_Request R;
	      Requests.push_back(R);
	      MPI_Irecv(&(local[offsets[other]]), numRecv, PARTICLE, 
			other, 0, MPI_COMM_WORLD, &(Requests.back()));
	      totalR+= numRecv;
	    }
	}
#ifdef DEBUG
    printf("rank=%d total send =%d, total recv=%d\n", rank, totalS, totalR);
#endif
    {
      TIME("Waitall");
      std::vector<MPI_Request> r(Requests.size());
      int i=0;
      for(std::list<MPI_Request>::iterator 
	    it=Requests.begin(); 
	  it!= Requests.end(); 
	  ++it, i++) 
	r[i] = *it;
      
      std::vector<MPI_Status> stats(r.size());
      
      int result = MPI_Waitall(r.size(), &(r[0]), &(stats[0]));
      if(result != MPI_SUCCESS)
	{
	  printf("Failture on migrate::Waitall  rank=%d \n", rank);
	  MPI_Abort(MPI_COMM_WORLD, result);
	}
    }
    MPI_Barrier(MPI_COMM_WORLD);
  }
}
int main( int argc, char **argv )
{    
    //
    //  process command line parameters
    //
    if( find_option( argc, argv, "-h" ) >= 0 )
    {
        printf( "Options:\n" );
        printf( "-h to see this help\n" );
        printf( "-n <int> to set the number of particles\n" );
        printf( "-o <filename> to specify the output file name\n" );
	printf( "-px <int> -py <int> to set the number of pthreads\n"); 
        return 0;
    }
    
    int n = read_int( argc, argv, "-n", 1000 );
    int NUM_THREADS_X = read_int( argc, argv, "-px", 1 );
    int NUM_THREADS_Y = read_int( argc, argv, "-py", 1 );
    omp_set_num_threads(NUM_THREADS_X*NUM_THREADS_Y);
    char *savename = read_string( argc, argv, "-o", NULL );
    
    //
    //  set up MPI
    //
    int n_proc, rank;
    int provided;
    MPI_Init_thread( &argc, &argv, MPI_THREAD_MULTIPLE, &provided);
    MPI_Comm_size( MPI_COMM_WORLD, &n_proc );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    if (provided != MPI_THREAD_MULTIPLE){
      if (rank == 0)
	printf("ERROR: threaded MPI not supported\n");
      MPI_Abort(MPI_COMM_WORLD,0);
    }
#ifdef  TAU
    TAU_PROFILE("main", "int (int, char **)", TAU_DEFAULT);
    TAU_PROFILE_INIT(argc, argv);
    TAU_PROFILE_SET_NODE(rank);
    TAU_PROFILE_SET_CONTEXT(0);
#endif
 
    //
    //  allocate generic resources
    //
    bool bsave = savename;
    FILE *fsave = savename && rank == 0 ? fopen( savename, "w" ) : NULL;
    particle_t *particles = (particle_t*) malloc( n * sizeof(particle_t) );
    
    MPI_Datatype PARTICLE;
    //MPI_Type_contiguous( 6, MPI_DOUBLE, &PARTICLE );
    MPI_Type_contiguous(7, MPI_DOUBLE, &PARTICLE);
    MPI_Type_commit( &PARTICLE );
    
    //
    //  set up the data partitioning across processors
    //
    int particle_per_proc = (n + n_proc - 1) / n_proc;
    int *partition_offsets = (int*) malloc( (n_proc+1) * sizeof(int) );
    for( int i = 0; i < n_proc+1; i++ )
        partition_offsets[i] = min( i * particle_per_proc, n );
    
    int *partition_sizes = (int*) malloc( n_proc * sizeof(int) );
    for( int i = 0; i < n_proc; i++ )
        partition_sizes[i] = partition_offsets[i+1] - partition_offsets[i];
    
    //
    //  allocate storage for local partition
    //
    int nlocal = partition_sizes[rank];
    //particle_t *local = (particle_t*) malloc( nlocal * sizeof(particle_t) );
    std::vector<particle_t> local((int)nlocal);
    std::vector<particle_t> local2;
    std::vector<particle_t> * local_ptr = &local;
    std::vector<particle_t> * local_ptr2 = &local2;
    std::vector<particle_t> * local_ptr_swp;
    local.reserve(nlocal*1.5);
    local2.reserve(nlocal*1.5);
    //
    //  initialize and distribute the particles (that's fine to leave it unoptimized)
    //
    double grid_size;
    set_size( n, grid_size );
    if( rank == 0 )
        init_particles( n, grid_size, particles );
    MPI_Scatterv( particles, partition_sizes, partition_offsets, 
		  PARTICLE, &(local[0]), nlocal, PARTICLE, 0, MPI_COMM_WORLD );
    
    //
    //  simulate a number of time steps
    //
    double simulation_time = read_timer( );
    //
    //  Code up to here is unchanged.  We start our algorithm from this point onwards.
    //
    //  Step one, break whole domain in large boxes for each MPI task.

    const int nx = sqrt((float)n_proc);
    const int ny=n_proc/nx;
    assert(nx*ny==n_proc);

    const int my_y = rank/nx;
    const int my_x = rank%nx;

    //  yeah, this leaves some processors hanging out there at the end, 
    //  we can make fancier integer logic here.
    double dx=grid_size/nx;
    double dy=grid_size/ny;
    
    const double cell_size = MIN_CELL_SIZE;
    const int bins_dim_x = (int)((dx+3*cell_size)/cell_size);
    const int bins_dim_y = (int)((dy+3*cell_size)/cell_size);
    const int bins_thread_size_x = bins_dim_x / NUM_THREADS_X 
				    + (bins_dim_x % NUM_THREADS_X > 0 ? 1 : 0);
    const int bins_thread_size_y = bins_dim_y / NUM_THREADS_Y 
				    + (bins_dim_y % NUM_THREADS_Y > 0 ? 1 : 0);

    const double grid_offset_x = my_x*dx - cell_size;;
    const double grid_offset_y = my_y*dy - cell_size;

    int numBins = bins_dim_x*bins_dim_y;
    std::vector< particle_t* >* bins[numBins];
    for(int i=0; i<numBins; i++) 
      {
	bins[i] = new std::vector<particle_t*>();
      }

    //
    int interact_neighbors[9] = {(-bins_dim_x-1), (-bins_dim_x), (-bins_dim_x+1),
				 -1, 0, 1,
				 (bins_dim_x-1), (bins_dim_x), (bins_dim_x+1)};
    int bin_x, bin_y, bin_num, new_nlocal;
    int * stay_counts = (int*)malloc(NUM_THREADS_X*NUM_THREADS_Y*sizeof(int)); 
    std::vector< particle_t* > *inter_bin;
    std::vector< particle_t* >::iterator main_part, inter_part;
    std::vector<int> counters(nx*ny*NUM_THREADS_X*NUM_THREADS_Y,0);
    std::vector<int> offsets;

    std::vector<particle_t> mpiBins[nx*ny*NUM_THREADS_X*NUM_THREADS_Y];
    for(int i=0; i<nx*ny*NUM_THREADS_X*NUM_THREADS_Y; i++) mpiBins[i].reserve(100);
 
    {
      TIME("main loop");
    //#pragma omp parallel private(bin_x, bin_y, bin_num)
    #pragma omp parallel num_threads(NUM_THREADS_X*NUM_THREADS_Y) private(bin_x, bin_y, bin_num) shared(nlocal, counters)
    {
      int tid = omp_get_thread_num();
      int thread_x = tid % NUM_THREADS_X;
      int thread_y = tid / NUM_THREADS_X;

    for( int step = 0; step < NSTEPS; step++ )
    {
      #pragma omp master
      {
	nlocal = local_ptr->size();
      }
      #pragma omp barrier
      if (n_proc > 1)
	migrate(*local_ptr, dx, dy, nx, ny, n_proc, rank, nlocal, PARTICLE, mpiBins, 
		NUM_THREADS_X, NUM_THREADS_Y, counters, offsets);
      #pragma omp barrier
      #pragma omp master
      {
	nlocal = local_ptr->size();
      }
      #pragma omp barrier
      
      //
      //  bin particles
      //
      if (0)
      {
      #pragma omp master 
      {
      for( int i = 0; i < nlocal; i++ )
      {
	bin_x = (int)(((*local_ptr)[i].x-grid_offset_x)/cell_size);
	bin_y = (int)(((*local_ptr)[i].y-grid_offset_y)/cell_size);
	bin_num = bin_y*bins_dim_x+bin_x;
	bins[bin_num]->push_back(&(*local_ptr)[i]);
	/*if( (bin_x >= thread_x*bins_thread_size_x) && 
	    (bin_x < (thread_x+1)*bins_thread_size_x) && 
	    (bin_y >= thread_y*bins_thread_size_y) && 
	    (bin_y < (thread_y+1)*bins_thread_size_y) ) {
	}*/
      }
      }
      } else {
      TIME("local binning");
      for( int i = tid*nlocal/(NUM_THREADS_X*NUM_THREADS_Y); i < nlocal; i++ )
      {
	bin_x = (int)(((*local_ptr)[i].x-grid_offset_x)/cell_size);
	bin_y = (int)(((*local_ptr)[i].y-grid_offset_y)/cell_size);
	if( (bin_x >= thread_x*bins_thread_size_x) && 
	    (bin_x < (thread_x+1)*bins_thread_size_x) && 
	    (bin_y >= thread_y*bins_thread_size_y) && 
	    (bin_y < (thread_y+1)*bins_thread_size_y) ) {
	  bin_num = bin_y*bins_dim_x+bin_x;
	  bins[bin_num]->push_back(&(*local_ptr)[i]);
	}
      }
      for( int i = 0; i< tid*nlocal/(NUM_THREADS_X*NUM_THREADS_Y); i++ )
      {
	bin_x = (int)(((*local_ptr)[i].x-grid_offset_x)/cell_size);
	bin_y = (int)(((*local_ptr)[i].y-grid_offset_y)/cell_size);
	if( (bin_x >= thread_x*bins_thread_size_x) && 
	    (bin_x < (thread_x+1)*bins_thread_size_x) && 
	    (bin_y >= thread_y*bins_thread_size_y) && 
	    (bin_y < (thread_y+1)*bins_thread_size_y) ) {
	  bin_num = bin_y*bins_dim_x+bin_x;
	  bins[bin_num]->push_back(&(*local_ptr)[i]);
	}
      }
      }
      #pragma omp barrier

      //MPI_Barrier(MPI_COMM_WORLD);
      //
      //  compute all forces
      //

      {
	TIME("apply_force loop");
      #pragma omp for private(inter_bin, inter_part, main_part)
      for( int i = 0; i < bins_dim_x*bins_dim_y; i++ )
      {
	for ( main_part=bins[i]->begin(); 
	      main_part<bins[i]->end(); 
	      main_part++ )
	{
	  (*main_part)->ax = 0;
	  (*main_part)->ay = 0;
	  for ( int bin_to = 0; bin_to < 9; bin_to++ )
	  {
	    // Edgar - if bin is on edge this will attempt redundant bin interactions
	    if (i + interact_neighbors[bin_to] < 0 ||
		i + interact_neighbors[bin_to] >= bins_dim_x*bins_dim_y )
	      continue;
	    inter_bin = bins[i + interact_neighbors[bin_to]];
	    for ( inter_part=inter_bin->begin(); 
		  inter_part<inter_bin->end(); 
		  inter_part++ )
	    {
	      apply_force( **main_part, **inter_part );
	    }
	  }
	}
      }
      }
      #pragma omp barrier
      if (n_proc > 1){ 
	int maxn;
	if (tid==NUM_THREADS_X*NUM_THREADS_Y-1) 
	  maxn = nlocal;
	else
	  maxn = (tid+1)*(nlocal/(NUM_THREADS_X*NUM_THREADS_Y));
	assert(maxn<=nlocal);
	stay_counts[tid]=0;
	#pragma omp barrier
	for( int i = tid*(nlocal/(NUM_THREADS_X*NUM_THREADS_Y)); 
		 i < maxn; 
		 i++ ){
	  if ((*local_ptr)[i].x < my_x*dx || (*local_ptr)[i].x >= (my_x+1)*dx ||
	      (*local_ptr)[i].y < my_y*dy || (*local_ptr)[i].y >= (my_y+1)*dy){
	    continue;
	  } else {
	    stay_counts[tid]++;
	  }
	}
	#pragma omp barrier
	#pragma omp master
	{
	  for (int i = 1; i < NUM_THREADS_X*NUM_THREADS_Y; i++){
	    stay_counts[i] += stay_counts[i-1];
	  }
	  local_ptr2->resize(stay_counts[NUM_THREADS_X*NUM_THREADS_Y-1]);
	}
	#pragma omp barrier
	int my_offset = (tid == 0) ? 0 : stay_counts[tid-1];
	int count_stay = 0;
	for( int i = tid*(nlocal/(NUM_THREADS_X*NUM_THREADS_Y)); 
		 i < maxn; 
		 i++ ){
	  if ((*local_ptr)[i].x < my_x*dx || (*local_ptr)[i].x >= (my_x+1)*dx ||
	      (*local_ptr)[i].y < my_y*dy || (*local_ptr)[i].y >= (my_y+1)*dy){
	    continue;
	  } else {
	    (*local_ptr2)[my_offset+count_stay] = (*local_ptr)[i];
	    count_stay++;
	  }
	}
  #ifdef DEBUG
	if (count_stay != stay_counts[tid]-my_offset){
	  printf("[%d][%d], new %d != %d old\n",rank,tid,count_stay,stay_counts[tid]-my_offset);
	  assert(0);
	}
  #endif
	#pragma omp barrier
	#pragma omp master
	{
	  local_ptr_swp = local_ptr;
	  local_ptr = local_ptr2;
	  local_ptr2 = local_ptr_swp;
	  nlocal = local_ptr->size();
	}
      }
      #pragma omp barrier

      
      //
      //  move particles
      //
      {
	TIME("move");
	#pragma omp for
	for( int i = 0; i < nlocal; i++ )
	  move( (*local_ptr)[i], grid_size );
      }
      #pragma omp barrier
      //
      //  save current step if necessary (slightly different semantics than in other codes)
      //
      #pragma omp master
      if(bsave && (step%SAVEFREQ) == 0 ){
	TIME("save");
	MPI_Gather( &nlocal, 1, MPI_INT, partition_sizes, 1, MPI_INT, 0, MPI_COMM_WORLD);
	particle_t send_buff[nlocal];
	for (int i = 0; i < nlocal; i++)
	  send_buff[i] = (*local_ptr)[i];
	if (rank == 0){
	  partition_offsets[0] = 0;
	  for (int i = 1; i < n_proc; i++){
	    partition_offsets[i] = partition_offsets[i-1] + partition_sizes[i-1];
	  }
	  particle_t all_particles[partition_offsets[n_proc-1] 
				   + partition_sizes[n_proc-1]];
          MPI_Gatherv( send_buff, nlocal, PARTICLE, all_particles, 
		       partition_sizes, partition_offsets, PARTICLE, 0, MPI_COMM_WORLD );
	  std::sort(all_particles, all_particles 
			      + partition_offsets[n_proc-1] + partition_sizes[n_proc-1],
			      comp_part);
	  save( fsave, partition_offsets[n_proc-1] + partition_sizes[n_proc-1], 
		all_particles, grid_size );
	} else {
          MPI_Gatherv( send_buff, nlocal, PARTICLE, NULL, NULL, 
		       NULL, PARTICLE, 0, MPI_COMM_WORLD );
	}
      }
      #pragma omp barrier
      
      //
      //  clean out the damn bins   ... 
      //
      #pragma omp for
      for( int i = 0; i < bins_dim_x*bins_dim_y; i++ )
      {
	bins[i]->clear();
      }
      
    }
    }
    }
    simulation_time = read_timer( ) - simulation_time;
    
    if( rank == 0 )
        printf( "n = %d, n_procs = %d, simulation time = %g s\n", n, n_proc, simulation_time );
    
    //
    //  release resources
    //
    //free( partition_offsets );
    //free( partition_sizes );
    // free( local );
    //free( particles );
    if( fsave )
        fclose( fsave );
    
    MPI_Finalize( );
    
    return 0;
}
