#include <mpi.h>
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <vector>
#include <list>
#include <cmath>
#include <cstring>
#include <algorithm>
#include "es-bvs-bl-common.h"

//
//  Altered benchmarking program for O(n/p) algorithm
//


void migrate(std::vector<particle_t>& local, double dx, double dy, 
	     int nx, int ny, int nproc, int rank, MPI_Datatype& PARTICLE)
{
    TIME("migrate");
    std::vector<particle_t> mpiBins[nx*ny];
    for(int i=0; i<nx*ny; i++) mpiBins[i].reserve(100);

    std::vector<int> counters(nx*ny,0);
   
    int nlocal = local.size();
    mpiBins[rank].reserve(nlocal);
    { 
      TIME("bin sort");
      for(int i = 0; i<nlocal; i++)
	{
	  particle_t& p = local[i];
	  int ii = p.x/dx;
	  int jj = p.y/dy;
	  int index=jj*nx + ii;
	  counters[index]++;
	  /*yes, actually pushing particles in, not pointers, need arrays for gather*/
	  mpiBins[index].push_back(p);
	  /* See if this particle is in a ghost region */
	  for (int j = -1; j<= 1; j++){
	    if (p.x + j*cutoff < 0.0 || p.x+j*cutoff >= dx*nx)
	      continue;
	    for (int k = -1; k<= 1; k++){
	      if (p.y + k*cutoff < 0.0 || p.y+k*cutoff >= dy*ny)
		continue;
	      int iii = (p.x+j*cutoff)/dx;
	      int jjj = (p.y+k*cutoff)/dy;
	      if (iii < 0 || iii >= nx)
		continue;
	      if (jjj < 0 || jjj >= ny)
		continue;
	      int index_ghost = jjj*nx + iii;
	      /* Make sure we didnt already add to this bin */
	      if (index_ghost != index && index_ghost >= 0 && index_ghost < nx*ny){
		if (mpiBins[index_ghost].empty() ||
		    mpiBins[index_ghost].back().globalID != p.globalID) 
		  {
		    counters[index_ghost]++;
		    mpiBins[index_ghost].push_back(p);
		  }
	      }
	    }
	  }
	}
    }
    std::vector<int> allCounters(nx*ny*nproc);

    //printf("rank= %d, nlocal=%d, nx=%d, ny=%d, nproc=%d\n",rank, nlocal, nx, ny, nproc);
    //MPI_Barrier(MPI_COMM_WORLD);
    // if(rank==1)
    //  for(int i=0; i<nx*ny; i++) printf("%d ",counters[i]);
    {
      TIME("Barrier before allgather");
      MPI_Barrier(MPI_COMM_WORLD);
    }
    {
      TIME("allgather");
      MPI_Allgather(&(counters[0]), nx*ny, MPI_INT, &(allCounters[0]), 
		    nx*ny, MPI_INT, MPI_COMM_WORLD);
    }
      
    // OK, we have the global histograms for all processors, on all processors.  
    // after 1000 processors this won't scale well, but a random
    // intial global distribution is also looking hard at that scale.
    std::vector<int> offsets(1,0);
    {
      TIME("build counters");
      nlocal = 0;

      for(int i=0; i<nproc; i++)
	{
	  int index = rank + i*nx*ny;
	  nlocal+= allCounters[index];
	offsets.push_back(offsets.back()+allCounters[index]);
	
	}
#ifdef DEBUG
      if(rank==0)
	{
	  printf("nlocal = %d   ", nlocal);
	  for(int i=0; i<offsets.size(); i++)
	    printf("%d ",offsets[i]);
	  fflush(stdout);
	}
#endif
    }
    local.resize(nlocal);
    std::list<MPI_Request> Requests;
    std::vector<MPI_Status>  Status;
    {
      TIME("Isend+Irecv");

      
      //we can re-order this loop to get better send-recv matching later...
      // as written this will have a lot of material piling into the unexpected buffers.
      int totalSend=0, totalRecv=0;
      for(int n=rank; n<nproc+rank; n++)
	{
	  int other = n%nproc;

	  int numSend = counters[other];
	  if(numSend > 0)
	    {
	      if(rank!= other)
		{
		  //MPI_Request sendR;
		  MPI_Request R;
		  Requests.push_back(R);
		  MPI_Isend(&(mpiBins[other][0]), numSend, PARTICLE, other,
			    0, MPI_COMM_WORLD, &(Requests.back()));
		  totalSend += numSend;
		}
	      else
		{
		  int start = offsets[other];
		  int end = offsets[other+1];
		  int num = end-start;
		  memcpy(&(local[start]), &(mpiBins[other][0]), num*sizeof(particle_t));
		  //  int j=0;  
// 		  for(int i=start; i<end; i++, j++)
// 		    {
// 		      local[i] = mpiBins[other][j];	 
// 		    }
		}
	    }
	  int numRecv = offsets[other+1]-offsets[other];
	  if(numRecv > 0 && rank!= other)
	    {
	      MPI_Request R;
	      Requests.push_back(R);
	      MPI_Irecv(&(local[offsets[other]]), numRecv, PARTICLE, 
			other, 0, MPI_COMM_WORLD, &(Requests.back()));
	      totalRecv+= numRecv;
	    }
	}
#ifdef DEBUG
    printf("rank=%d total send =%d, total recv=%d\n", rank, totalSend, totalRecv);
#endif
    }
    //MPI_Barrier(MPI_COMM_WORLD);
    {
      TIME("Waitall");
      std::vector<MPI_Request> r(Requests.size());
      int i=0;
      for(std::list<MPI_Request>::iterator 
	    it=Requests.begin(); 
	  it!= Requests.end(); 
	  ++it, i++) 
	r[i] = *it;
      
      MPI_Status stats[r.size()];
      int result = MPI_Waitall(r.size(), &(r[0]), &(stats[0]));
      if(result != MPI_SUCCESS)
	{
	  printf("Failture on migrate::Waitall  rank=%d \n", rank);
	  MPI_Abort(MPI_COMM_WORLD, result);
	}
    }
}
int main( int argc, char **argv )
{    
    //
    //  process command line parameters
    //
    if( find_option( argc, argv, "-h" ) >= 0 )
    {
        printf( "Options:\n" );
        printf( "-h to see this help\n" );
        printf( "-n <int> to set the number of particles\n" );
        printf( "-o <filename> to specify the output file name\n" );
        return 0;
    }
    
    int n = read_int( argc, argv, "-n", 1000 );
    char *savename = read_string( argc, argv, "-o", NULL );
    
    //
    //  set up MPI
    //
    int n_proc, rank;
    MPI_Init( &argc, &argv );
    MPI_Comm_size( MPI_COMM_WORLD, &n_proc );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
#ifdef  TAU
    TAU_PROFILE("main", "int (int, char **)", TAU_DEFAULT);
    TAU_PROFILE_INIT(argc, argv);
    TAU_PROFILE_SET_NODE(rank);
    TAU_PROFILE_SET_CONTEXT(0);
#endif
 
    std::list<int> numLocalHistory;
    std::list<double> applyForce;
    //
    //  Allocate generic resources
    //
    bool bsave = savename;
    FILE *fsave = savename && rank == 0 ? fopen( savename, "w" ) : NULL;
    particle_t *particles = (particle_t*) malloc( n * sizeof(particle_t) );
    
    MPI_Datatype PARTICLE;
    //MPI_Type_contiguous( 6, MPI_DOUBLE, &PARTICLE );
    MPI_Type_contiguous(7, MPI_DOUBLE, &PARTICLE);
    MPI_Type_commit( &PARTICLE );
    
    //
    //  set up the data partitioning across processors
    //
    int particle_per_proc = (n + n_proc - 1) / n_proc;
    int *partition_offsets = (int*) malloc( (n_proc+1) * sizeof(int) );
    for( int i = 0; i < n_proc+1; i++ )
        partition_offsets[i] = min( i * particle_per_proc, n );
    
    int *partition_sizes = (int*) malloc( n_proc * sizeof(int) );
    for( int i = 0; i < n_proc; i++ )
        partition_sizes[i] = partition_offsets[i+1] - partition_offsets[i];
    
    //
    //  allocate storage for local partition
    //
    int nlocal = partition_sizes[rank];
    //particle_t *local = (particle_t*) malloc( nlocal * sizeof(particle_t) );
    std::vector<particle_t> local(nlocal);
    //
    //  initialize and distribute the particles (that's fine to leave it unoptimized)
    //
    double grid_size;
    set_size( n, grid_size );
    if( rank == 0 )
        init_particles( n, grid_size, particles );
    MPI_Scatterv( particles, partition_sizes, partition_offsets, 
		  PARTICLE, &(local[0]), nlocal, PARTICLE, 0, MPI_COMM_WORLD );
    
    //
    //  simulate a number of time steps
    //
    double simulation_time = read_timer( );
    //
    //  Code up to here is unchanged.  We start our algorithm from this point onwards.
    //
    //  Step one, break whole domain in large boxes for each MPI task.

    const int nx = sqrt((float)n_proc);
    const int ny=n_proc/nx;
    assert(nx*ny==n_proc);

    const int my_y = rank/nx;
    const int my_x = rank%nx;

    //  yeah, this leaves some processors hanging out there at the end, 
    //  we can make fancier integer logic here.
    double dx=grid_size/nx;
    double dy=grid_size/ny;
    
    const double cell_size = MIN_CELL_SIZE;
    const int bins_dim_x = (int)((dx+3*cell_size)/cell_size);
    const int bins_dim_y = (int)((dy+3*cell_size)/cell_size);

    const double grid_offset_x = my_x*dx - cell_size;;
    const double grid_offset_y = my_y*dy - cell_size;

    int numBins = bins_dim_x*bins_dim_y;
    std::vector< particle_t* >* bins[numBins];
    for(int i=0; i<numBins; i++) 
      {
	bins[i] = new std::vector<particle_t*>();
      }

    //
    int interact_neighbors[9] = {(-bins_dim_x-1), (-bins_dim_x), (-bins_dim_x+1),
				 -1, 0, 1,
				 (bins_dim_x-1), (bins_dim_x), (bins_dim_x+1)};
    int bin_x, bin_y, bin_num; 
    std::vector< particle_t* > *inter_bin;
    std::vector< particle_t* >::iterator main_part, inter_part;
 
    {
      TIME("main loop");
    for( int step = 0; step < NSTEPS; step++ )
    {
#ifdef DEBUG
      printf("[%d] migrating\n", rank);
#endif
      
      migrate(local, dx, dy, nx, ny, n_proc, rank, PARTICLE);
      numLocalHistory.push_back(local.size());      
      
      nlocal = local.size();
#ifdef DEBUG
      printf("binning\n");
#endif
      //
      //  bin particles
      //
      {
      TIME("local binning");
      int numbin =  bins_dim_x*bins_dim_y;
      for( int i = 0; i < nlocal; i++ )
      {
	bin_x = (int)((local[i].x-grid_offset_x)/cell_size);
	bin_y = (int)((local[i].y-grid_offset_y)/cell_size);
	bin_num = bin_y*bins_dim_x+bin_x;
	if(bin_num < 0 || bin_num >=numbin ){
	  printf("[%d] i=%d/%d x = %lf y = %lf failed aiming at bin %d %d\n",
		 rank,i,nlocal,local[i].x,local[i].y,bin_x,bin_y);
	  printf("[%d] I own x:%lf - %lf, y:%lf - %lf  bins_x:%d, bins_y:%d\n",
		 rank, dx*my_x, dx*(my_x+1), dy*my_y, dy*(my_y+1), bins_dim_x, bins_dim_y);
	  assert(false);
	}
	bins[bin_num]->push_back(&local[i]);
      }
      }

#ifdef DEBUG
      printf("[%d] computing forces\n",rank);
#endif
      //MPI_Barrier(MPI_COMM_WORLD);
      //
      //  compute all forces
      //

      {
	TIME("apply_force loop");
	double t = read_timer();
	for( int i = 0; i < bins_dim_x*bins_dim_y; i++ )
	  {
	    for ( main_part=bins[i]->begin(); 
		  main_part<bins[i]->end(); 
		  main_part++ )
	      {

	
		(*main_part)->ax = 0;
		(*main_part)->ay = 0;
		for ( int bin_to = 0; bin_to < 9; bin_to++ )
		  {
		    // Edgar - if bin is on edge this will attempt redundant bin interactions
		    if (i + interact_neighbors[bin_to] < 0 ||
			i + interact_neighbors[bin_to] >= bins_dim_x*bins_dim_y )
		      continue;
		    inter_bin = bins[i + interact_neighbors[bin_to]];
		    for ( inter_part=inter_bin->begin(); 
			  inter_part<inter_bin->end(); 
			  inter_part++ )
		      {
			apply_force( **main_part, **inter_part );
		      }
		  }
	      }
	  }
	t=read_timer()-t;
	applyForce.push_back(t);
      }
  
#ifdef DEBUG
      printf("[%d] pruning particles\n", rank);
#endif

      //
      //  prune ghost particles out
      //
      for( int i = 0; i < nlocal; i++ ){
	if (local[i].x < my_x*dx || local[i].x >= (my_x+1)*dx ||
	    local[i].y < my_y*dy || local[i].y >= (my_y+1)*dy){
	  local.erase(local.begin() + i);
	  i--;
	  nlocal--;
	}
      }
      
#ifdef DEBUG
      printf("[%d] moving particles\n", rank);
#endif

      //
      //  move particles
      //
      {
	TIME("move");
	for( int i = 0; i < nlocal; i++ )
	  move( local[i], grid_size );
      }
#ifdef DEBUG
      printf("[%d] saving particles\n", rank);
#endif
      //
      //  save current step if necessary (slightly different semantics than in other codes)
      //
      if(bsave && (step%SAVEFREQ) == 0 ){
	TIME("save");
	MPI_Gather( &nlocal, 1, MPI_INT, partition_sizes, 1, MPI_INT, 0, MPI_COMM_WORLD);
	particle_t send_buff[nlocal];
	for (int i = 0; i < nlocal; i++)
	  send_buff[i] = local[i];
	if (rank == 0){
	  partition_offsets[0] = 0;
	  for (int i = 1; i < n_proc; i++){
	    partition_offsets[i] = partition_offsets[i-1] + partition_sizes[i-1];
	  }
	  particle_t all_particles[partition_offsets[n_proc-1] 
				   + partition_sizes[n_proc-1]];
          MPI_Gatherv( send_buff, nlocal, PARTICLE, all_particles, 
		       partition_sizes, partition_offsets, PARTICLE, 0, MPI_COMM_WORLD );
	  std::sort(all_particles, all_particles 
			      + partition_offsets[n_proc-1] + partition_sizes[n_proc-1],
			      comp_part);
	  save( fsave, partition_offsets[n_proc-1] + partition_sizes[n_proc-1], 
		all_particles, grid_size );
	} else {
          MPI_Gatherv( send_buff, nlocal, PARTICLE, NULL, NULL, 
		       NULL, PARTICLE, 0, MPI_COMM_WORLD );
	}
      }
      
      //
      //  clean out the damn bins   ... 
      //
      for( int i = 0; i < bins_dim_x*bins_dim_y; i++ )
	{
	  bins[i]->clear();
	  bins[1]->reserve(50);
	}
      
    }
    }
    simulation_time = read_timer( ) - simulation_time;
    
    if( rank == 0 )
        printf( "n = %d, n_procs = %d, simulation time = %g s\n", n, n_proc, simulation_time );
    
    char buf[17];
    sprintf(buf,"Node%2d.history", rank);
    FILE* particleHistory = fopen(buf, "w");
    std::list<int>::iterator it; int k=1;
    for(it=numLocalHistory.begin(); it!= numLocalHistory.end(); ++it, ++k)
      fprintf(particleHistory, "%d  %d\n",k,  *it);
    fflush(particleHistory);
    fclose(particleHistory);
    
    sprintf(buf,"applyForce%2d.history", rank);
    FILE* force = fopen(buf, "w");
    std::list<double>::iterator d; k=1;
    for(d=applyForce.begin(); d!= applyForce.end(); ++d, ++k)
      fprintf(force, "%d  %f\n",k,  *d);
    fflush(force);
    fclose(force);
	    
    //
    //  release resources
    //
    free( partition_offsets );
    free( partition_sizes );
    // free( local );
    free( particles );
    if( fsave )
        fclose( fsave );
    
    MPI_Finalize( );
    
    return 0;
}
