#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <assert.h>
#include <mpi.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_sort_long.h>
#include <gsl/gsl_sort.h>

#include "halofinder.h"

static long currNumParts;

void readpartsfromsnapshot(void)
{
  long NumPages;
  long NumBuffParts,NumBuffPartsPrev;
  float *buffPx,*buffPy,*buffPz,*buffVx,*buffVy,*buffVz;
  int partDestTask;
  long i,j,*rank,*tmpLongLoc,keepind;
  float dx,dy,dz;
  int *sendcounts,*displs,coords[3];
  size_t *sindex,*tmpSizetLoc;
  float xxSource[6],xxSave[6];
  long dest,rankSource,rankSave;
  double extravolume;
  long NumPartsToAlloc;
  long NumIORounds,currIORound,pageToRead;
  float *xx[6];
  int PageDone,AllPagesDone;
  
#ifdef DSAMPLEFACTOR
  double *randnum,*tmpDoubleLoc;
  size_t *randind;
#endif

  long (*read_NumPages)(void);
  int (*read_Page)(long, long*, float**, float**, float**, float**, float**, float**);
  
  //get # of parts to alloc - include buffer regions if needed
  NumPartsToAlloc = (long) (((double) (haloFinderData.TotNumParts))/((double) (NTasks)));
  if(NumDomains[0] > 1)
    {
      extravolume 
	= (haloFinderData.BoxLengthSnapshot/NumDomains[0]+haloFinderData.DomainBuffSize*2.0)
	*(haloFinderData.BoxLengthSnapshot/NumDomains[1]+haloFinderData.DomainBuffSize*2.0)
	*(haloFinderData.BoxLengthSnapshot/NumDomains[2]+haloFinderData.DomainBuffSize*2.0) 
	- (haloFinderData.BoxLengthSnapshot/NumDomains[0])
	*(haloFinderData.BoxLengthSnapshot/NumDomains[1])
	*(haloFinderData.BoxLengthSnapshot/NumDomains[2]);
      NumPartsToAlloc = (long) (((double) NumPartsToAlloc)*EXTRA_PART_ALLOC_FACTOR);
      NumPartsToAlloc += (long) (((double) (haloFinderData.TotNumParts))/pow(haloFinderData.BoxLengthSnapshot,3.0)*extravolume*EXTRA_PART_ALLOC_FACTOR);
    }
  else
    extravolume = 0.0;
  
#ifdef SMALLMEMTEST 
  NumPartsToAlloc *= 0.05;
#endif
  
#ifdef DEBUG
  if(ThisTask == 0 && DEBUG_LEVEL > 0)
    {
      fprintf(stderr,"domain volume = %le, extra volume = %e, NumPartsToAlloc = %ld, NumExtraParts = %ld\n",
	      (haloFinderData.BoxLengthSnapshot/NumDomains[0]+haloFinderData.DomainBuffSize*2.0)
	      *(haloFinderData.BoxLengthSnapshot/NumDomains[1]+haloFinderData.DomainBuffSize*2.0)
	      *(haloFinderData.BoxLengthSnapshot/NumDomains[2]+haloFinderData.DomainBuffSize*2.0),
	      extravolume,
	      NumPartsToAlloc,
	      (long) (((double) (haloFinderData.TotNumParts))/pow(haloFinderData.BoxLengthSnapshot,3.0)*extravolume*1.01)
	      );
    }
#endif
  
  //init vars for reading
  NumPages = 0;
  currNumParts = 0;
  reallocparts(NumPartsToAlloc); // allocate particles in memory
  rank = NULL;
  sindex = NULL;
  NumBuffPartsPrev = 0;
#ifdef DSAMPLEFACTOR
  randnum = NULL;
  randind = NULL;
#endif
  
  //set function pointers for proper simulation type  
#ifdef DEBUG
  if(ThisTask == 0)
    fprintf(stderr,"reading snapshot from simulation type '%s'\n",haloFinderData.SimulationType);
#endif
  if(strcmp(haloFinderData.SimulationType,"ART") == 0)
    {
      read_NumPages = &read_ART_NumPages;
      read_Page = &read_ART_Page;
    }
  else if(strcmp(haloFinderData.SimulationType,"GADGET") == 0)
    {
      read_NumPages = &read_GADGET_NumPages;
      read_Page = &read_GADGET_Page;
    }
  else if(strcmp(haloFinderData.SimulationType,"GADGET-SINGLEFILE") == 0)
    {
      read_NumPages = &read_GADGETsinglefile_NumPages;
      read_Page = &read_GADGETsinglefile_Page;
    }
  else
    {
      if(ThisTask == 0)
	fprintf(stderr,"could not find I/O code for simulation type '%s'!\n",haloFinderData.SimulationType);
      MPI_Abort(MPI_COMM_WORLD,123);
    }
  
  //read total number # of pages here
  if(ThisTask == 0)
    {
      NumPages = read_NumPages();
#ifdef DEBUG
      fprintf(stderr,"NumPages = %ld\n",NumPages);
#endif
    }
  MPI_Bcast(&NumPages,1,MPI_LONG,0,MPI_COMM_WORLD); 
  
  // setup to recv and send stuff
  sendcounts = (int*)malloc(sizeof(int)*NTasks);
  assert(sendcounts != NULL);
  
  displs = (int*)malloc(sizeof(int)*NTasks);
  assert(displs != NULL);
  
  dx = (float) (haloFinderData.BoxLengthSnapshot/NumDomains[0]);
  dy = (float) (haloFinderData.BoxLengthSnapshot/NumDomains[1]);
  dz = (float) (haloFinderData.BoxLengthSnapshot/NumDomains[2]);
  
  NumIORounds = NumPages/haloFinderData.NumTasksIOInParallel;
  if(haloFinderData.NumTasksIOInParallel*NumIORounds != NumPages)
    NumIORounds += 1;
  
#ifdef DEBUG
  if(ThisTask == 0)
    fprintf(stderr,"dx,dy,dz = %f|%f|%f\n",dx,dy,dz);
#endif
  
  /*
    Do a parallel read for the snapshot
    
    It works like this.  The whole snapshot is divided into "pages" which are either in the same file or spearate files.  
    Given the number of tasks which are allowed to do I/O at the same time (haloFinderData.NumTasksIOInParallel), each task from
    0 to haloFinderData.NumTasksIOInParallel-1 reads a given page.  Then each of those pages is distributed to the rest of the tasks
    in a pairwise exchange of particles.

    This means the we need to do this NumPages/haloFinderData.NumTasksIOInParallel + 1 times so that all pages are read.  Each time we do this
    is a round of I/O.  Once all fo the rounds are done, we have read in all of the particles.
    
    Note that this method is quite flexible.  With a few modifications, we could use MPI I/O to read in the part of the file and then do exchanges.
    
    The flags PageDone and AllPagesDone are used to indicate if each task and all tasks are done reading the current I/O round page.  This
    allows us to read each page in chunks, with each chunk being returned in successive calls to the read_Page function.  When the last chunk fo each 
    page is read, then flag PageDone is set to one.  An MPI_Allreduce is used to compute an "and" over all flags and store in AllPagesDone.
  */
  for(currIORound=0;currIORound<NumIORounds;++currIORound)
    {
#ifdef DEBUG
      if(ThisTask == 0)
	fprintf(stderr,"IO round = %ld of %ld\n",currIORound+1,NumIORounds);
#endif
      
      //init vars 
      PageDone = 0;
      AllPagesDone = 0;
      
      while(!AllPagesDone)
	{
	  if(ThisTask < haloFinderData.NumTasksIOInParallel)
	    {
	      pageToRead = ThisTask + currIORound*haloFinderData.NumTasksIOInParallel;
	      
#ifdef SMALLMEMTEST 
	      if(pageToRead >= 2)
		pageToRead = -1;
#else
	      if(pageToRead >= NumPages)
		pageToRead = -1;
#endif
#ifdef DEBUG
#if DEBUG_LEVEL > 1
	      fprintf(stderr,"%d: IO round = %ld, reading page %ld\n",ThisTask,currIORound,pageToRead);
#endif
#endif
	      if(pageToRead >= 0)
		{
		  PageDone = read_Page(pageToRead,&NumBuffParts,&buffPx,&buffPy,&buffPz,&buffVx,&buffVy,&buffVz);
		  xx[0] = buffPx;
		  xx[1] = buffPy;
		  xx[2] = buffPz;
		  xx[3] = buffVx;
		  xx[4] = buffVy;
		  xx[5] = buffVz;
		  
#ifdef DEBUG
		  if(DEBUG_LEVEL > 1)
		    fprintf(stderr,"%d: NumBuffParts = %ld\n",ThisTask,NumBuffParts);
#endif  
		  
		  if(NumBuffPartsPrev != NumBuffParts)
		    {
		      tmpLongLoc = (long*)realloc(rank,sizeof(long)*NumBuffParts);
		      assert(tmpLongLoc != NULL);
		      rank = tmpLongLoc;
		      
#ifdef DSAMPLEFACTOR
		      tmpDoubleLoc = (double*)realloc(randnum,sizeof(double)*NumBuffParts);
		      assert(tmpDoubleLoc != NULL);
		      randnum = tmpDoubleLoc;
		      
		      tmpSizetLoc = (size_t*)realloc(randind,sizeof(size_t)*NumBuffParts);
		      assert(tmpSizetLoc != NULL);
		      randind = tmpSizetLoc;
#endif
		    }
		  
#ifdef LIGHTCONE
#ifdef SPHERE
		  for(i=0;i<NumBuffParts;++i)
		    {
		      buffPx[i] += haloFinderData.BoxLengthSnapshot/2.0;
		      buffPy[i] += haloFinderData.BoxLengthSnapshot/2.0;
		      buffPz[i] += haloFinderData.BoxLengthSnapshot/2.0;
		    }
#endif
#endif
		  
#ifdef DSAMPLEFACTOR
		  for(i=0;i<NumBuffParts;++i)
		    randnum[i] = drand48();
		  gsl_sort_index(randind,randnum,(size_t) 1,(size_t) NumBuffParts);
#endif
		  
		  keepind = 0;
		  for(i=0;i<NumBuffParts;++i)
		    {
#ifdef DSAMPLEFACTOR
		      coords[0] = (int) (buffPx[randind[i]]/dx);
		      coords[1] = (int) (buffPy[randind[i]]/dy);
		      coords[2] = (int) (buffPz[randind[i]]/dz);
#else
		      coords[0] = (int) (buffPx[i]/dx);
		      coords[1] = (int) (buffPy[i]/dy);
		      coords[2] = (int) (buffPz[i]/dz);
#endif      
		      for(j=0;j<3;++j)
			{
#ifndef LIGHTCONE
			  while(coords[j] < 0)
			    coords[j] += NumDomains[j];
			  while(coords[j] >= NumDomains[j])
			    coords[j] -= NumDomains[j];
#else
			  if(coords[j] < 0)
			    coords[j] = 0;
			  if(coords[j] >= NumDomains[j])
			    coords[j] = NumDomains[j]-1;
#endif
			}
		      
#ifdef DSAMPLEFACTOR
		      if(keepind < NumBuffParts/DSAMPLEFACTOR)
			{
			  MPI_Cart_rank(cartComm,coords,&partDestTask);
			  rank[keepind] = (long) (partDestTask);
			  buffPx[keepind] = buffPx[randind[i]];
			  buffPy[keepind] = buffPy[randind[i]];
			  buffPz[keepind] = buffPz[randind[i]];
			  buffVx[keepind] = buffVx[randind[i]];
			  buffVy[keepind] = buffVy[randind[i]];
			  buffVz[keepind] = buffVz[randind[i]];
			  ++keepind;
			}
#else
		      MPI_Cart_rank(cartComm,coords,&partDestTask);
		      rank[keepind] = (long) (partDestTask);
		      buffPx[keepind] = buffPx[i];
		      buffPy[keepind] = buffPy[i];
		      buffPz[keepind] = buffPz[i];
		      buffVx[keepind] = buffVx[i];
		      buffVy[keepind] = buffVy[i];
		      buffVz[keepind] = buffVz[i];
		      ++keepind;
#endif
		    }
		  NumBuffParts = keepind;
		  
		  if(NumBuffPartsPrev != NumBuffParts)
		    {
		      tmpSizetLoc = (size_t*)realloc(sindex,sizeof(size_t)*NumBuffParts);
		      assert(tmpSizetLoc != NULL);
		      sindex = tmpSizetLoc;
		    }
		  gsl_sort_long_index(sindex,rank,(size_t) 1,(size_t) NumBuffParts);
		  
#ifdef DEBUG
		  if(DEBUG_LEVEL > 1)
		    fprintf(stderr,"%d: sorted parts by task\n",ThisTask);
#endif
		  
		  //now loop through partDestTask counting # of parts per Task in sendcounts
		  for(j=0;j<NTasks;++j)
		    sendcounts[j] = 0;
		  for(i=0;i<NumBuffParts;++i)
		    sendcounts[rank[i]] += 1;
		  
		  for(i=0;i<NumBuffParts;++i)
		    rank[sindex[i]] = i;
		  
		  for(i=0;i<NumBuffParts;++i) /* reoder with an in-place algorithm - see Gadget-2 for details - destroys rank */
		    {
		      if(i != rank[i])
			{
			  xxSource[0] = buffPx[i];
			  xxSource[1] = buffPy[i];
			  xxSource[2] = buffPz[i];
			  xxSource[3] = buffVx[i];
			  xxSource[4] = buffVy[i];
			  xxSource[5] = buffVz[i];
			  rankSource = rank[i];
			  dest = rank[i];
			  
			  do
			    {
			      xxSave[0] = buffPx[dest];
			      xxSave[1] = buffPy[dest];
			      xxSave[2] = buffPz[dest];
			      xxSave[3] = buffVx[dest];
			      xxSave[4] = buffVy[dest];
			      xxSave[5] = buffVz[dest];
			      rankSave = rank[dest];
			      
			      buffPx[dest] = xxSource[0];
			      buffPy[dest] = xxSource[1];
			      buffPz[dest] = xxSource[2];
			      buffVx[dest] = xxSource[3];
			      buffVy[dest] = xxSource[4];
			      buffVz[dest] = xxSource[5];
			      rank[dest] = rankSource;
			      
			      if(dest == i)
				break;
			      
			      xxSource[0] = xxSave[0];
			      xxSource[1] = xxSave[1];
			      xxSource[2] = xxSave[2];
			      xxSource[3] = xxSave[3];
			      xxSource[4] = xxSave[4];
			      xxSource[5] = xxSave[5];
			      rankSource = rankSave;
			      
			      dest = rankSource;
			    }
			  while(1);
			}
		    }
	  	  
		  //fill in displs
		  displs[0] = 0;
		  for(j=1;j<NTasks;++j)
		    displs[j] = sendcounts[j-1] + displs[j-1];
		  
#ifdef DEBUG
		  if(DEBUG_LEVEL > 1)
		    for(j=0;j<NTasks;++j)
		      fprintf(stderr,"%d: send to %ld displs,sendcounts = %d|%d\n",ThisTask,j,displs[j],sendcounts[j]);
#endif
		  
		}
	      else
		{
		  NumBuffParts = 0;
		  for(j=0;j<NTasks;++j)
		    {
		      displs[j] = 0;
		      sendcounts[j] = 0;
		    }
		  PageDone = 1;
		}
	      
	      NumBuffPartsPrev = NumBuffParts;
	    }
	  else
	    {
	      NumBuffParts = 0;
	      for(j=0;j<NTasks;++j)
		{
		  displs[j] = 0;
		  sendcounts[j] = 0;
		}
	      PageDone = 1;
	      
	      NumBuffPartsPrev = NumBuffParts;
	    }
	  
	  //now distribute particles in rank, buffPx, buffPy, ... vectors
	  distributepartsfromsnapshot(xx,sendcounts,displs);
	  
	  //clean up
	  if(NumBuffParts > 0)
	    {
	      free(buffPx);
	      free(buffPy);
	      free(buffPz);
	      free(buffVx);
	      free(buffVy);
	      free(buffVz);
#ifdef DEBUG
	      if(DEBUG_LEVEL > 1)
		fprintf(stderr,"%d: freed buffParts\n",ThisTask);
#endif
	    }
	 
	  //do MPI_Allreduce for PageDone flag
	  MPI_Allreduce(&PageDone,&AllPagesDone,1,MPI_INT,MPI_LAND,MPI_COMM_WORLD);
	}
    }
  
  if(NumDomains[0] == 1)
    if(currNumParts < NumParts)
      reallocparts(currNumParts-NumParts);
  
  NumPartsInDomain = currNumParts;
  
#ifdef DEBUG
  //if(DEBUG_LEVEL > 1)
    fprintf(stderr,"%d: curr # of parts = %ld, tot # parts alloc = %ld\n",ThisTask,currNumParts,NumParts);
#endif

#ifdef DEBUG  
  if(ThisTask == 0)
    fprintf(stderr,"done reading snapshot\n");
#endif
  
  if(rank != NULL)
    free(rank);
  if(sindex != NULL)
    free(sindex);
#ifdef DSAMPLEFACTOR
  if(randnum != NULL)
    free(randnum);
  if(randind != NULL)
    free(randind);
#endif
  free(sendcounts);
  free(displs);
}

void distributepartsfromsnapshot(float *xx[6], int *sendcounts, int *displs)
{
  int log2NTasks;
  int level,sendTask,recvTask;
  long Nsend,Nrecv,i;
  MPI_Status Stat;

  log2NTasks = 0;
  while(NTasks > (1 << log2NTasks))
    ++log2NTasks;
  
  /*algorithm to loop through pairs of tasks linearly
    -lifted from Gadget-2 under GPL (http://www.gnu.org/copyleft/gpl.html)
    -see pm_periodic.c from Gadget-2 at http://www.mpa-garching.mpg.de/gadget/
  */
  for(level = 0; level < (1 << log2NTasks); level++) /* note: for level=0, target is the same task */
    {
#ifdef DEBUG
      if(ThisTask == 0 && DEBUG_LEVEL > 1)
	fprintf(stderr,"level = %d of %d\n",level,(1 << log2NTasks));
#endif
      sendTask = ThisTask;
      recvTask = ThisTask ^ level;
      if(recvTask < NTasks)
	{
	  //get send and recv counts
	  Nsend = (long) (sendcounts[recvTask]);
	  MPI_Sendrecv(&Nsend,1,MPI_LONG,recvTask,TAG_NUMDATA_PARTIOEXCHNG,
                       &Nrecv,1,MPI_LONG,recvTask,TAG_NUMDATA_PARTIOEXCHNG,
                       MPI_COMM_WORLD,&Stat);
	  
	  //now do send or recv if needed
	  if(Nsend > 0 || Nrecv > 0)
            {
	      //make sure have enough mem
	      if(Nrecv + currNumParts > NumParts)
		{
		  //reallocparts(Nrecv + currNumParts - NumParts);
		  fprintf(stderr,"%d: did not alloc enough parts on input\n",ThisTask);
		  MPI_Abort(MPI_COMM_WORLD,123);
		}                                                                          
	      
	      if(sendTask != recvTask)
		{
		  MPI_Sendrecv(xx[0]+displs[recvTask],(int) Nsend,MPI_FLOAT,recvTask,TAG_DATA_PARTIOEXCHNG,
			       PartsPx+currNumParts,(int) Nrecv,MPI_FLOAT,recvTask,TAG_DATA_PARTIOEXCHNG,
			       MPI_COMM_WORLD,&Stat);
		  
		  MPI_Sendrecv(xx[1]+displs[recvTask],(int) Nsend,MPI_FLOAT,recvTask,TAG_DATA_PARTIOEXCHNG,
			       PartsPy+currNumParts,(int) Nrecv,MPI_FLOAT,recvTask,TAG_DATA_PARTIOEXCHNG,
			       MPI_COMM_WORLD,&Stat);
		  
		  MPI_Sendrecv(xx[2]+displs[recvTask],(int) Nsend,MPI_FLOAT,recvTask,TAG_DATA_PARTIOEXCHNG,
			       PartsPz+currNumParts,(int) Nrecv,MPI_FLOAT,recvTask,TAG_DATA_PARTIOEXCHNG,
			       MPI_COMM_WORLD,&Stat);
		  
		  MPI_Sendrecv(xx[3]+displs[recvTask],(int) Nsend,MPI_FLOAT,recvTask,TAG_DATA_PARTIOEXCHNG,
			       PartsVx+currNumParts,(int) Nrecv,MPI_FLOAT,recvTask,TAG_DATA_PARTIOEXCHNG,
			       MPI_COMM_WORLD,&Stat);
		  
		  MPI_Sendrecv(xx[4]+displs[recvTask],(int) Nsend,MPI_FLOAT,recvTask,TAG_DATA_PARTIOEXCHNG,
			       PartsVy+currNumParts,(int) Nrecv,MPI_FLOAT,recvTask,TAG_DATA_PARTIOEXCHNG,
			       MPI_COMM_WORLD,&Stat);
		  
		  MPI_Sendrecv(xx[5]+displs[recvTask],(int) Nsend,MPI_FLOAT,recvTask,TAG_DATA_PARTIOEXCHNG,
			       PartsVz+currNumParts,(int) Nrecv,MPI_FLOAT,recvTask,TAG_DATA_PARTIOEXCHNG,
			       MPI_COMM_WORLD,&Stat);
		  
		  currNumParts += Nrecv;
		}
	      else
		{
		  //if sendTask == recvTask then just copy over parts
		  for(i=0;i<sendcounts[recvTask];++i)
		    {
		      PartsPx[i+currNumParts] = xx[0][i+displs[recvTask]];
		      PartsPy[i+currNumParts] = xx[1][i+displs[recvTask]];
		      PartsPz[i+currNumParts] = xx[2][i+displs[recvTask]];
		      PartsVx[i+currNumParts] = xx[3][i+displs[recvTask]];
		      PartsVy[i+currNumParts] = xx[4][i+displs[recvTask]];
		      PartsVz[i+currNumParts] = xx[5][i+displs[recvTask]];
		    }
		  currNumParts += sendcounts[recvTask];
		}
	    }
	}
    }
}

void exchangebufferregions(void)
{
  int log2NTasks;
  int level,sendTask,recvTask;
  float *domainCenterX,*domainCenterY,*domainCenterZ;
  float centSepMaxX,centSepMaxY,centSepMaxZ,dx,dy,dz,rhalf;
  float *buffPartsPosVel,*fpTemp;
  long NumBuffParts,*buffPartsInd,*lpTemp,Nsend,Nrecv,i,PartsRecvLoc;
  MPI_Status Stat;
  float fudge;
  float domainCenter[3];
  
#ifdef DEBUG
  if(ThisTask == 0)
    fprintf(stderr,"exchanging particles\n");
#endif
  
  NumBuffParts = 0;
  buffPartsPosVel = NULL;
  buffPartsInd = NULL;
  
  log2NTasks = 0;
  while(NTasks > (1 << log2NTasks))
    ++log2NTasks;
  
  //get domain center
  dx = (float) (haloFinderData.BoxLengthSnapshot/NumDomains[0]);
  dy = (float) (haloFinderData.BoxLengthSnapshot/NumDomains[1]);
  dz = (float) (haloFinderData.BoxLengthSnapshot/NumDomains[2]);
  domainCenter[0] = (float) (domainCoords[0]*dx + dx/2.0);
  domainCenter[1] = (float) (domainCoords[1]*dy + dy/2.0);
  domainCenter[2] = (float) (domainCoords[2]*dz + dz/2.0);
  
  domainCenterX = (float*)malloc(sizeof(float)*NTasks);
  assert(domainCenterX != NULL);
  MPI_Allgather(&(domainCenter[0]),1,MPI_FLOAT,domainCenterX,1,MPI_FLOAT,MPI_COMM_WORLD); 
  domainCenterY = (float*)malloc(sizeof(float)*NTasks);
  assert(domainCenterY != NULL);
  MPI_Allgather(&(domainCenter[1]),1,MPI_FLOAT,domainCenterY,1,MPI_FLOAT,MPI_COMM_WORLD); 
  domainCenterZ = (float*)malloc(sizeof(float)*NTasks);
  assert(domainCenterZ != NULL);
  MPI_Allgather(&(domainCenter[2]),1,MPI_FLOAT,domainCenterZ,1,MPI_FLOAT,MPI_COMM_WORLD); 
  
  fudge = 1.0;
  centSepMaxX = (float) ((haloFinderData.BoxLengthSnapshot/NumDomains[0]/2.0 + haloFinderData.DomainBuffSize)*fudge);
  centSepMaxY = (float) ((haloFinderData.BoxLengthSnapshot/NumDomains[1]/2.0 + haloFinderData.DomainBuffSize)*fudge);
  centSepMaxZ = (float) ((haloFinderData.BoxLengthSnapshot/NumDomains[2]/2.0 + haloFinderData.DomainBuffSize)*fudge);
  
  rhalf = (float) (haloFinderData.BoxLengthSnapshot/2.0);
  
  /*algorithm to loop through pairs of tasks linearly
    -lifted from Gadget-2 under GPL (http://www.gnu.org/copyleft/gpl.html)
    -see pm_periodic.c from Gadget-2 at http://www.mpa-garching.mpg.de/gadget/
  */
  for(level = 0; level < (1 << log2NTasks); level++) /* note: for level=0, target is the same task */
    {
#ifdef DEBUG
      if(ThisTask == 0 && DEBUG_LEVEL > 0)
	fprintf(stderr,"level = %d of %d\n",level,(1 << log2NTasks));
#endif
      sendTask = ThisTask;
      recvTask = ThisTask ^ level;
      if(recvTask < NTasks && sendTask != recvTask)
        {
	  Nsend = 0;
	  for(i=0;i<NumPartsInDomain;++i)
	    {
	      dx = (float) (fabs(PartsPx[i] - domainCenterX[recvTask]));
#ifndef LIGHTCONE
	      if(dx > rhalf)
		dx = (float) (haloFinderData.BoxLengthSnapshot - dx);
#endif
	      dy = (float) (fabs(PartsPy[i] - domainCenterY[recvTask]));
#ifndef LIGHTCONE
	      if(dy > rhalf)
		dy = (float) (haloFinderData.BoxLengthSnapshot - dy);
#endif
	      dz = (float) (fabs(PartsPz[i] - domainCenterZ[recvTask]));
#ifndef LIGHTCONE
	      if(dz > rhalf)
		dz = (float) (haloFinderData.BoxLengthSnapshot - dz);
#endif
	      if(dx < centSepMaxX && dy < centSepMaxY && dz < centSepMaxZ)
		{
		  if(Nsend == NumBuffParts)
		    {
		      lpTemp = (long*)realloc(buffPartsInd,sizeof(long)*(NumBuffParts+10000));
		      assert(lpTemp != NULL);
		      buffPartsInd = lpTemp;
		      
		      fpTemp = (float*)realloc(buffPartsPosVel,sizeof(float)*(NumBuffParts+10000));
		      assert(fpTemp != NULL);
		      buffPartsPosVel = fpTemp;
		      
		      NumBuffParts += 10000;
		    }
		  
		  buffPartsInd[Nsend] = i;
		  ++Nsend;
		}
	    }
	  
	  MPI_Sendrecv(&Nsend,1,MPI_LONG,recvTask,TAG_NUMDATA_BUFFEXCHNG,
		       &Nrecv,1,MPI_LONG,recvTask,TAG_NUMDATA_BUFFEXCHNG,
		       MPI_COMM_WORLD,&Stat);
	  
	  if(Nsend > 0 || Nrecv > 0)
	    {
	      //make room for recv parts
	      PartsRecvLoc = currNumParts;
	      if(Nrecv+currNumParts > NumParts)
		{
		  //reallocparts(Nrecv);
		  fprintf(stderr,"%d: did not alloc enough parts allocated on input: NumParts = %ld, NumPartsInDomain = %ld, Nrecv = %ld, currNumParts = %ld\n",
			  ThisTask,NumParts,NumPartsInDomain,Nrecv,currNumParts);
		  MPI_Abort(MPI_COMM_WORLD,123);
		}
	      
	      //do send recv calls
	      for(i=0;i<Nsend;++i)
		buffPartsPosVel[i] = PartsPx[buffPartsInd[i]];
	      MPI_Sendrecv(buffPartsPosVel,(int) Nsend,MPI_FLOAT,recvTask,TAG_DATA_BUFFEXCHNG,
			   PartsPx+PartsRecvLoc,(int) Nrecv,MPI_FLOAT,recvTask,TAG_DATA_BUFFEXCHNG,
			   MPI_COMM_WORLD,&Stat);
	      
	      for(i=0;i<Nsend;++i)
		buffPartsPosVel[i] = PartsPy[buffPartsInd[i]];
	      MPI_Sendrecv(buffPartsPosVel,(int) Nsend,MPI_FLOAT,recvTask,TAG_DATA_BUFFEXCHNG+1,
			   PartsPy+PartsRecvLoc,(int) Nrecv,MPI_FLOAT,recvTask,TAG_DATA_BUFFEXCHNG+1,
			   MPI_COMM_WORLD,&Stat);
	      
	      for(i=0;i<Nsend;++i)
		buffPartsPosVel[i] = PartsPz[buffPartsInd[i]];
	      MPI_Sendrecv(buffPartsPosVel,(int) Nsend,MPI_FLOAT,recvTask,TAG_DATA_BUFFEXCHNG+2,
			   PartsPz+PartsRecvLoc,(int) Nrecv,MPI_FLOAT,recvTask,TAG_DATA_BUFFEXCHNG+2,
			   MPI_COMM_WORLD,&Stat);

	      for(i=0;i<Nsend;++i)
		buffPartsPosVel[i] = PartsVx[buffPartsInd[i]];
	      MPI_Sendrecv(buffPartsPosVel,(int) Nsend,MPI_FLOAT,recvTask,TAG_DATA_BUFFEXCHNG+3,
			   PartsVx+PartsRecvLoc,(int) Nrecv,MPI_FLOAT,recvTask,TAG_DATA_BUFFEXCHNG+3,
			   MPI_COMM_WORLD,&Stat);
	      
	      for(i=0;i<Nsend;++i)
		buffPartsPosVel[i] = PartsVy[buffPartsInd[i]];
	      MPI_Sendrecv(buffPartsPosVel,(int) Nsend,MPI_FLOAT,recvTask,TAG_DATA_BUFFEXCHNG+4,
			   PartsVy+PartsRecvLoc,(int) Nrecv,MPI_FLOAT,recvTask,TAG_DATA_BUFFEXCHNG+4,
			   MPI_COMM_WORLD,&Stat);
	      
	      for(i=0;i<Nsend;++i)
		buffPartsPosVel[i] = PartsVz[buffPartsInd[i]];
	      MPI_Sendrecv(buffPartsPosVel,(int) Nsend,MPI_FLOAT,recvTask,TAG_DATA_BUFFEXCHNG+5,
			   PartsVz+PartsRecvLoc,(int) Nrecv,MPI_FLOAT,recvTask,TAG_DATA_BUFFEXCHNG+5,
			   MPI_COMM_WORLD,&Stat);
	      
	      currNumParts += Nrecv;
	      
#ifdef DEBUG
	      if(DEBUG_LEVEL > 1)
		fprintf(stderr,"%d: level = %d, currNumParts = %ld (NumParts = %ld), Nsend,Nrecv = %ld|%ld\n",ThisTask,level,currNumParts,NumParts,Nsend,Nrecv);
#endif
	    }
	}
    }
  
#ifdef DEBUG
  if(ThisTask == 0 && DEBUG_LEVEL > 0)
    fprintf(stderr,"done exchanging particles\n");
#endif
  
  if(currNumParts < NumParts)
    reallocparts(currNumParts-NumParts);
#ifdef DEBUG
  if(DEBUG_LEVEL > 1)
    fprintf(stderr,"%d: currNumParts = %ld, NumParts = %ld\n",ThisTask,currNumParts,NumParts);
#endif
	      
  if(NumBuffParts > 0)
    {
      free(buffPartsInd);
      free(buffPartsPosVel);
    }
  free(domainCenterX);
  free(domainCenterY);
  free(domainCenterZ);
}
