#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <mpi.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_sort_float.h>
#include <gsl/gsl_heapsort.h>
#ifdef USEOPENMP
#include <omp.h>
#endif

#include "halofinder.h"

void halopercolation_parallel(void)
{
  int i,haloInd;
  kdTreeData *td;
  float *px,*py,*pz;
  int periodic;
  float minHaloMass;
  Halo *tmpHalos;
  FoFData *fofd;
  int maxNumNbrs,*nbrsInd;
  float *nbrsRad2;
  float maxMass,globalMaxMass;
  long TotNumHalos;
  
  logProfileTag(PROFILETAG_HALOPERC);
  
  MPI_Reduce(&NumHalos,&TotNumHalos,1,MPI_LONG,MPI_SUM,0,MPI_COMM_WORLD);
  
#ifdef DEBUG
  if(ThisTask == 0)
    fprintf(stderr,"%d: doing parallel halo percolation, NumHalos = %ld\n",ThisTask,TotNumHalos);
#endif
  
  //setup  
#ifdef LIGHTCONE
  periodic = 0;
#else
  periodic = 1;
#endif
    
  //remove halos below mass limit
  minHaloMass = haloFinderData.MinNumPartsHalo;
  haloInd = 0;
  for(i=0;i<NumHalos;++i)
    if(Halos[i].mass >= minHaloMass)
      {
	Halos[haloInd] = Halos[i];
	++haloInd;
      }
  
  if(haloInd < NumHalos)
    {
      tmpHalos = (Halo*)realloc(Halos,sizeof(Halo)*haloInd);
      assert(tmpHalos != NULL);
      Halos = tmpHalos;
      NumHalos = haloInd;
    }
  
  //get search radius
  maxMass = Halos[0].mass;
  for(i=0;i<NumHalos;++i)
    if(Halos[i].mass > maxMass)
      maxMass = Halos[i].mass;
  MPI_Allreduce(&maxMass,&globalMaxMass,1,MPI_FLOAT,MPI_MAX,MPI_COMM_WORLD);
  
  //setup for searching
  maxNumNbrs = 100000;
  nbrsRad2 = (float*)malloc(sizeof(float)*maxNumNbrs);
  assert(nbrsRad2 != NULL);
  nbrsInd = (int*)malloc(sizeof(int)*maxNumNbrs);
  assert(nbrsInd != NULL);
  
  //build kdtree
  px = (float*)malloc(sizeof(float)*NumHalos);
  assert(px != NULL);
  py = (float*)malloc(sizeof(float)*NumHalos);
  assert(py != NULL);
  pz = (float*)malloc(sizeof(float)*NumHalos);
  assert(pz != NULL);
  for(i=0;i<NumHalos;++i)
    {
      px[i] = Halos[i].pos[0];
      py[i] = Halos[i].pos[1];
      pz[i] = Halos[i].pos[2];
    }
  td = buildkdTree(px,py,pz,(int) NumHalos,NULL);
    
  //first build FoFGroups
  fofd = build_fofhalogroups(td,px,py,pz,periodic,globalMaxMass,&nbrsRad2,&nbrsInd,&maxNumNbrs);
  assert(fofd != NULL);
  
  //now look at neighboring domains and check for other group members - import/export them
  join_fofhalogroups(&td,&px,&py,&pz,periodic,globalMaxMass,&nbrsRad2,&nbrsInd,&maxNumNbrs,&fofd);
  
  //free some mem
  free(nbrsRad2);
  free(nbrsInd);
  destroykdTree(td);
  free(px);
  free(py);
  free(pz);
  
  //now resolve conflicts for each group
  percolate_fofhalogroups(periodic,fofd);
  
  //clean up
  free(fofd);
  
  MPI_Reduce(&NumHalos,&TotNumHalos,1,MPI_LONG,MPI_SUM,0,MPI_COMM_WORLD);
#ifdef DEBUG
  if(ThisTask == 0)
    fprintf(stderr,"%d: done with halo percolation, NumHalos = %ld\n",ThisTask,TotNumHalos);
#endif
  
  logProfileTag(PROFILETAG_HALOPERC);
}

typedef struct {
  long index;
  float val;
} SortHalo;

static int compSortHaloVal(const void *a, const void *b)
{
  if(((const SortHalo*)a)->val > ((const SortHalo*)b)->val)
    return -1;
  else if(((const SortHalo*)a)->val < ((const SortHalo*)b)->val)
    return 1;
  else
    return 0;
}

void percolate_fofhalogroups(int periodic, FoFData *fofd)
{
  long i,next,length,haloInd,j;
  Halo *tmpHalos;
  long NumSortHalosAlloc;
  SortHalo *sortHalos,*tmpSortHalos;
  short int *keep;
  float dx,dy,dz,rad;
  float rhalf;
  long *NumHalosTasks;
  
  rhalf = (float) (haloFinderData.BoxLengthSnapshot/2.0);
  
  //set index to one to keep all halos
  keep = (short int*)malloc(sizeof(short int)*NumHalos);
  assert(keep != NULL);
  for(i=0;i<NumHalos;++i)
    keep[i] = 1;
  
  //now do percolation in each fof halo group
  sortHalos = NULL;
  NumSortHalosAlloc = 0;
  for(i=0;i<NumHalos;++i)
    {
      if(fofd[i].head == i)
	{

#ifdef DEBUG
#if DEBUG_LEVEL > 1
	  fprintf(stderr,"%d: i = %ld, head = %ld, length = %ld\n",ThisTask,i,fofd[i].head,fofd[i].length);
#endif
#endif
	  //put halos into stack
	  length = fofd[i].length;
	  if(length >= NumSortHalosAlloc)
	    {
	      tmpSortHalos = (SortHalo*)realloc(sortHalos,sizeof(SortHalo)*length);
	      assert(tmpSortHalos != NULL);
	      sortHalos = tmpSortHalos;
	      NumSortHalosAlloc = length;
	    }
	  
	  length = 0;
	  next = i;
	  while(next >= 0)
	    {
	      sortHalos[length].index = next;
#ifdef PERCOLATE_MASS
	      sortHalos[length].val = Halos[next].mass;
#else
	      sortHalos[length].val = Halos[next].vcirc;
#endif	      
	      next = fofd[next].link;
	      ++length;
	    }
	  assert(length == fofd[i].length);
	  
	  //sort by mass or vcirc
	  qsort(sortHalos,(size_t) length,sizeof(SortHalo),compSortHaloVal);
	  
	  //do percolation
	  for(next=0;next<length;++next)
	    {
	      if(keep[sortHalos[next].index] == 1) //halo is to be kept so far
		{
		  //mark all other halos which overlap and are less massive with keep = 0
		  //uses an n^2 search since halos will be elimated as percolation goes ahead
		  for(j=0;j<length;++j)
		    {
		      if(j != next && keep[sortHalos[j].index] == 1) //only consider halos which are not themselves and are currently being kept
			{
			  dx = (float) (fabs(Halos[sortHalos[next].index].pos[0] - Halos[sortHalos[j].index].pos[0]));
			  dy = (float) (fabs(Halos[sortHalos[next].index].pos[1] - Halos[sortHalos[j].index].pos[1]));
			  dz = (float) (fabs(Halos[sortHalos[next].index].pos[2] - Halos[sortHalos[j].index].pos[2]));
			  
			  if(periodic)
			    {
			      if(dx > rhalf)
				dx = haloFinderData.BoxLengthSnapshot - dx;

			      if(dy > rhalf)
				dy = haloFinderData.BoxLengthSnapshot - dy;

			      if(dz > rhalf)
				dz = haloFinderData.BoxLengthSnapshot - dz;
			    }
			  
			  rad = (float) (sqrt(dx*dx + dy*dy + dz*dz));
			  if(rad < halo_exlcusion_radius(Halos[sortHalos[next].index].mass*haloFinderData.PartMass,Halos[sortHalos[j].index].mass*haloFinderData.PartMass))
			    keep[sortHalos[j].index] = 0;
			}
		    }
		}
	    }
	}
    }
  
  if(NumSortHalosAlloc > 0)
    free(sortHalos);

  //remove extra halos
  haloInd = 0;
  for(i=0;i<NumHalos;++i)
    if(keep[i])
      {
	Halos[haloInd] = Halos[i];
	++haloInd;
      }
  free(keep);
  
  if(haloInd < NumHalos)
    {
      tmpHalos = (Halo*)realloc(Halos,sizeof(Halo)*haloInd);
      assert(tmpHalos != NULL);
      Halos = tmpHalos;
      NumHalos = haloInd;
    }
  
  //set halo indexes
  NumHalosTasks = (long*)malloc(sizeof(long)*NTasks);
  assert(NumHalosTasks != NULL);
  MPI_Allgather(&NumHalos,1,MPI_LONG,NumHalosTasks,1,MPI_LONG,MPI_COMM_WORLD); 
  haloInd = 0;
  for(i=0;i<ThisTask;++i)
    haloInd += NumHalosTasks[i];
  for(i=0;i<NumHalos;++i)
    Halos[i].index = haloInd + i;
  
  free(NumHalosTasks);
}

static void append_halo_fofhalogroups(kdTreeData *td, float *px, float *py, float *pz, int periodic, float searchRadius,
				      float **nbrsRad2, int **nbrsInd, int *maxNumNbrs, FoFData *fofd, int i)
{
  int j,NumNbrs,nbr,next;
  
  //find its neighbors                                                                                                                                                                                                                                                      
  NumNbrs = get_nnbrs_kdtree(Halos[i].pos,searchRadius,periodic,haloFinderData.BoxLengthSnapshot,
			     nbrsRad2,nbrsInd,maxNumNbrs,px,py,pz,td);
  
  for(j=0;j<NumNbrs;++j)
    {
      nbr = (*nbrsInd)[j];
      if(sqrt((*nbrsRad2)[j]) <  halo_exlcusion_radius(Halos[i].mass*haloFinderData.PartMass,Halos[nbr].mass*haloFinderData.PartMass) &&
	 fofd[nbr].head != fofd[i].head) //test to make sure halo is a nbr and that it is not in the same group                                                                                                                                                             
	{
	  if(fofd[fofd[nbr].head].length < fofd[fofd[i].head].length)
	    {
	      fofd[fofd[fofd[i].head].tail].link = fofd[nbr].head;
	      fofd[fofd[i].head].tail = fofd[fofd[nbr].head].tail;
	      fofd[fofd[i].head].length += fofd[fofd[nbr].head].length;
	      
	      next = fofd[nbr].head;
	      while(next >=0)
		{
		  fofd[next].head = fofd[i].head;
		  next = fofd[next].link;
		}
	    }
	  else
	    {
	      fofd[fofd[fofd[nbr].head].tail].link = fofd[i].head;
	      fofd[fofd[nbr].head].tail = fofd[fofd[i].head].tail;
	      fofd[fofd[nbr].head].length += fofd[fofd[i].head].length;
	      
	      next = fofd[i].head;
	      while(next >=0)
		{
		  fofd[next].head = fofd[nbr].head;
		  next = fofd[next].link;
		}
	    }
	}
    }
}

void join_fofhalogroups(kdTreeData **td, float **px, float **py, float **pz, int periodic, float globalMaxHaloMass,
			float **nbrsRad2, int **nbrsInd, int *maxNumNbrs, FoFData **fofd)
{
  int log2NTasks;
  int level,sendTask,recvTask;
  long Nsend,Nrecv;
  MPI_Status Stat;
  float domainOrigin[3];
  Halo *halosToSend,*tmpHalos,*recvHalos;
  long NumHalosToSendAlloc,NumRecvHalosAlloc;
  float dx,dy,dz;
  float domainCenter[3];
  float *domainCenterX,*domainCenterY,*domainCenterZ;
  float centSepMax[3];
  float *centSepMaxX,*centSepMaxY,*centSepMaxZ;
  float rhalf;
  float hpos[3];
  long GlobalNumExtraHalos,NumExtraHalos,TotNumHalosSent;
  long *haloIndsToExport,*tmpLong;
  long NumHaloIndsToExportAlloc,NumHaloIndsToExport;
  long NumNbrs,i,j,nbr;
  long currNumHalos,slidAHalo,startLoc;
  float *tmpFloat;
  FoFData *tmpFoFData;
  int bigTask;
  float searchRadius;
  
  ////////////////
  //setup       //
  ////////////////
  rhalf = (float) (haloFinderData.BoxLengthSnapshot/2.0);
  getDomainOrigin_shiftpartshalos(domainOrigin);
  
  //get domain centers from nodes
  dx = (float) (haloFinderData.BoxLengthSnapshot/NumDomains[0]);
  dy = (float) (haloFinderData.BoxLengthSnapshot/NumDomains[1]);
  dz = (float) (haloFinderData.BoxLengthSnapshot/NumDomains[2]);
  domainCenter[0] = (float) (domainCoords[0]*dx + dx/2.0);
  domainCenter[1] = (float) (domainCoords[1]*dy + dy/2.0);
  domainCenter[2] = (float) (domainCoords[2]*dz + dz/2.0);
  domainCenterX = (float*)malloc(sizeof(float)*NTasks);
  assert(domainCenterX != NULL);
  MPI_Allgather(&(domainCenter[0]),1,MPI_FLOAT,domainCenterX,1,MPI_FLOAT,MPI_COMM_WORLD); 
  domainCenterY = (float*)malloc(sizeof(float)*NTasks);
  assert(domainCenterY != NULL);
  MPI_Allgather(&(domainCenter[1]),1,MPI_FLOAT,domainCenterY,1,MPI_FLOAT,MPI_COMM_WORLD); 
  domainCenterZ = (float*)malloc(sizeof(float)*NTasks);
  assert(domainCenterZ != NULL);
  MPI_Allgather(&(domainCenter[2]),1,MPI_FLOAT,domainCenterZ,1,MPI_FLOAT,MPI_COMM_WORLD); 
  
  //if distance to domainCenter is less than this, particle needs to be sent to other node
  centSepMaxX = (float*)malloc(sizeof(float)*NTasks);
  assert(centSepMaxX != NULL);
  centSepMaxY = (float*)malloc(sizeof(float)*NTasks);
  assert(centSepMaxY != NULL);
  centSepMaxZ = (float*)malloc(sizeof(float)*NTasks);
  assert(centSepMaxZ != NULL);
  
  //for looping through tasks
  log2NTasks = 0;
  while(NTasks > (1 << log2NTasks))
    ++log2NTasks;
  
  halosToSend = NULL;
  NumHalosToSendAlloc = 0;
  recvHalos = NULL;
  NumRecvHalosAlloc = 0;
  haloIndsToExport = NULL;
  NumHaloIndsToExportAlloc = 0;
  
  do {
    
    //Halos[i].index == TRUE marks if halos are on this task
    //also gather domain extent of all halos on this task
    for(i=0;i<NumHalos;++i)
      {
	Halos[i].index = 1;
	
	//shift to global position                                                                                                                                                                                                                                                                 
	hpos[0] = Halos[i].pos[0];
	hpos[1] = Halos[i].pos[1];
	hpos[2] = Halos[i].pos[2];
	point_domain2global(hpos+0,hpos+1,hpos+2,domainOrigin);

	//get sep from center
	dx = (float) (fabs(domainCenterX[recvTask] - hpos[0]));
	dy = (float) (fabs(domainCenterY[recvTask] - hpos[1]));
	dz = (float) (fabs(domainCenterZ[recvTask] - hpos[2]));
	if(periodic)
	  {
	    if(dx > rhalf)
	      dx = haloFinderData.BoxLengthSnapshot - dx;

	    if(dy > rhalf)
	      dy = haloFinderData.BoxLengthSnapshot - dy;

	    if(dz > rhalf)
	      dz = haloFinderData.BoxLengthSnapshot - dz;
	  }
	
	if(i == 0)
	  {
	    centSepMax[0] = dx;
	    centSepMax[1] = dx;
	    centSepMax[2] = dx;
	  }
	else
	  {
	    if(dx > centSepMax[0])
	      centSepMax[0] = dx;
	    if(dy > centSepMax[1])
	      centSepMax[1] = dy;
	    if(dz > centSepMax[2])
	      centSepMax[2] = dz;
	  }
      }
    MPI_Allgather(&(centSepMax[0]),1,MPI_FLOAT,centSepMaxX,1,MPI_FLOAT,MPI_COMM_WORLD); 
    MPI_Allgather(&(centSepMax[1]),1,MPI_FLOAT,centSepMaxY,1,MPI_FLOAT,MPI_COMM_WORLD); 
    MPI_Allgather(&(centSepMax[2]),1,MPI_FLOAT,centSepMaxZ,1,MPI_FLOAT,MPI_COMM_WORLD); 
    
    //record how many extra halos we get - when this is zero for all tasks, we are done with exchanges
    NumExtraHalos = 0;
    
    //record how many halos we send elsewhere
    TotNumHalosSent = 0;
    
    bigTask = 1;
    
    //algorithm to loop through pairs of tasks linearly using level and log2NTasks
    //  -lifted from Gadget-2 under GPL (http://www.gnu.org/copyleft/gpl.html)
    //  -see pm_periodic.c from Gadget-2 at http://www.mpa-garching.mpg.de/gadget/
    for(level = 0; level < (1 << log2NTasks); level++) // note: for level=0, target is the same task
      {
#ifdef DEBUG
#if DEBUG_LEVEL > 1
	if(ThisTask == 0)
	  fprintf(stderr,"level = %d of %d\n",level,(1 << log2NTasks));
#endif
#endif
	sendTask = ThisTask;
	recvTask = ThisTask ^ level;
	if(recvTask < NTasks && sendTask != recvTask)
	  {
	    //compute Nsend
	    Nsend = 0;
	    for(i=0;i<NumHalos;++i)
	      {
		if(Halos[i].index) //if true, halo is on this task
		  {
		    //shift to global position
		    hpos[0] = Halos[i].pos[0];
		    hpos[1] = Halos[i].pos[1];
		    hpos[2] = Halos[i].pos[2];
		    point_domain2global(hpos+0,hpos+1,hpos+2,domainOrigin);
		    
		    //test if needs ot be sent over
		    dx = (float) (fabs(domainCenterX[recvTask] - hpos[0]));
		    dy = (float) (fabs(domainCenterY[recvTask] - hpos[1]));
		    dz = (float) (fabs(domainCenterZ[recvTask] - hpos[2]));
		    
		    if(periodic)
		      {
			if(dx > rhalf)
			  dx = haloFinderData.BoxLengthSnapshot - dx;
			
			if(dy > rhalf)
			  dy = haloFinderData.BoxLengthSnapshot - dy;
			
			if(dz > rhalf)
			  dz = haloFinderData.BoxLengthSnapshot - dz;
		      }
		    
		    searchRadius = halo_exlcusion_radius(Halos[i].mass*haloFinderData.PartMass,globalMaxHaloMass*haloFinderData.PartMass);
		    
		    if(dx <= centSepMaxX[recvTask]+searchRadius || dy <= centSepMaxY[recvTask]+searchRadius || dz <= centSepMaxZ[recvTask]+searchRadius)
		      {
			if(Nsend >= NumHalosToSendAlloc)
			  {
			    tmpHalos = (Halo*)realloc(halosToSend,sizeof(Halo)*(NumHalosToSendAlloc + 1000));
			    assert(tmpHalos != NULL);
			    halosToSend = tmpHalos;
			    NumHalosToSendAlloc += 1000;
			  }
			
			halosToSend[Nsend] = Halos[i];
			halosToSend[Nsend].pos[0] = hpos[0];
			halosToSend[Nsend].pos[1] = hpos[1];
			halosToSend[Nsend].pos[2] = hpos[2];
			halosToSend[Nsend].index = (*fofd)[(*fofd)[i].head].length;
			
			++Nsend;
		      }
		  }
	      }
	    
	    //get send and recv counts
	    MPI_Sendrecv(&Nsend,1,MPI_LONG,recvTask,TAG_NUMDATA1_FOFEXCHNG,
			 &Nrecv,1,MPI_LONG,recvTask,TAG_NUMDATA1_FOFEXCHNG,
			 MPI_COMM_WORLD,&Stat);
	    
	    //now do send or recv if needed
	    if(Nsend > 0 || Nrecv > 0)
	      {
		//get more space if needed
		if(Nrecv > NumRecvHalosAlloc)
		  {
		    NumRecvHalosAlloc = Nrecv;
		    tmpHalos = (Halo*)realloc(recvHalos,sizeof(Halo)*Nrecv);
		    assert(tmpHalos != NULL);
		    recvHalos = tmpHalos;
		  }
		
		MPI_Sendrecv(halosToSend,(int) Nsend,MPI_Halo,recvTask,TAG_DATA1_FOFEXCHNG,
			     recvHalos,(int) Nrecv,MPI_Halo,recvTask,TAG_DATA1_FOFEXCHNG,
			     MPI_COMM_WORLD,&Stat);
		
		//now test recvHalos to see if anything needs to be sent back
		//send one of ThisTask's halos to other task if 
		//  1) halo from other task links to it 
		//  2) if halo on ThisTask is smaller than fof group of other halo
		//  3) Halo is actually on this task (i.e. Halos[nbr].index is true)
		NumHaloIndsToExport = 0;
		for(i=0;i<Nrecv;++i)
		  {
		    point_global2domain(recvHalos[i].pos+0,recvHalos[i].pos+1,recvHalos[i].pos+2,domainOrigin);
		    
		    searchRadius = halo_exlcusion_radius(recvHalos[i].mass*haloFinderData.PartMass,globalMaxHaloMass*haloFinderData.PartMass);
		    
		    NumNbrs = get_nnbrs_kdtree(recvHalos[i].pos,searchRadius,periodic,haloFinderData.BoxLengthSnapshot,
					       nbrsRad2,nbrsInd,maxNumNbrs,*px,*py,*pz,*td);
		    
		    for(j=0;j<NumNbrs;++j)
		      {
			nbr = (*nbrsInd)[j];
			if(sqrt((*nbrsRad2)[j]) < halo_exlcusion_radius(recvHalos[i].mass*haloFinderData.PartMass,Halos[nbr].mass*haloFinderData.PartMass) &&
			   Halos[nbr].index)
			  {
			    if(recvHalos[i].index > (*fofd)[(*fofd)[nbr].head].length ||
			       (recvHalos[i].index == (*fofd)[(*fofd)[nbr].head].length && sendTask > recvTask && bigTask) ||
			       (recvHalos[i].index == (*fofd)[(*fofd)[nbr].head].length && sendTask < recvTask && !bigTask))
			      {
				if(NumHaloIndsToExport >= NumHaloIndsToExportAlloc)
				  {
				    tmpLong = (long*)realloc(haloIndsToExport,sizeof(long)*(NumHaloIndsToExportAlloc + 1000));
				    assert(tmpLong != NULL);
				    haloIndsToExport = tmpLong;
				    NumHaloIndsToExportAlloc += 1000;
				  }
				
				haloIndsToExport[NumHaloIndsToExport] = nbr;
				++NumHaloIndsToExport;
			      }
			  }
		      }
		  }
		
		//grab all of the friends of halos on ThisTask to be sent
		Nsend = 0;
		for(i=0;i<NumHaloIndsToExport;++i)
		  {
		    nbr = (*fofd)[haloIndsToExport[i]].head;
		    while(nbr >= 0)
		      {
			if(Halos[nbr].index) //if halo is on this task, export it
			  {
			    point_domain2global(Halos[nbr].pos+0,Halos[nbr].pos+1,Halos[nbr].pos+2,domainOrigin);
			    
			    if(Nsend >= NumHalosToSendAlloc)
			      {
				tmpHalos = (Halo*)realloc(halosToSend,sizeof(Halo)*(NumHalosToSendAlloc + 1000));
				assert(tmpHalos != NULL);
				halosToSend = tmpHalos;
				NumHalosToSendAlloc += 1000;
			      }
			    
			    halosToSend[Nsend] = Halos[nbr];
			    ++Nsend;
			    
			    Halos[nbr].index = 0;
			  }
			
			nbr = (*fofd)[nbr].link;
		      }
		  }
		
		TotNumHalosSent += Nsend;
		
		//tell recvTask how many it is getting
		MPI_Sendrecv(&Nsend,1,MPI_LONG,recvTask,TAG_NUMDATA2_FOFEXCHNG,
			     &Nrecv,1,MPI_LONG,recvTask,TAG_NUMDATA2_FOFEXCHNG,
			     MPI_COMM_WORLD,&Stat);
	
		if(Nsend > 0 || Nrecv > 0)
		  {
#ifdef DEBUG	
#if DEBUG_LEVEL > 1
		    if(sendTask > recvTask)
		      fprintf(stderr,"%d (%ld) <-> (%ld) %d\n",sendTask,Nsend,Nrecv,recvTask);
#endif
#endif
		    
		    //add halos to be recv at end of halos list
		    tmpHalos = (Halo*)realloc(Halos,sizeof(Halo)*(Nrecv+NumHalos+NumExtraHalos));
		    assert(tmpHalos != NULL);
		    Halos = tmpHalos;
		    
		    //get the stuff
		    MPI_Sendrecv(halosToSend,(int) Nsend,MPI_Halo,recvTask,TAG_DATA2_FOFEXCHNG,
				 Halos+NumHalos+NumExtraHalos,(int) Nrecv,MPI_Halo,recvTask,TAG_DATA2_FOFEXCHNG,
				 MPI_COMM_WORLD,&Stat);
		    
		    for(i=0;i<Nrecv;++i)
		      point_global2domain(Halos[i+NumExtraHalos+NumHalos].pos+0,
					  Halos[i+NumExtraHalos+NumHalos].pos+1,
					  Halos[i+NumExtraHalos+NumHalos].pos+2,domainOrigin);
		    
		    //record how many extra halos we get
		    NumExtraHalos += Nrecv;
		  }
	      }
	  }

	//switch which way we break ties - helps for mem load balance
	bigTask = 1 - bigTask; 
      }
    
#ifdef DEBUG
#if DEBUG_LEVEL > 1
    fprintf(stderr,"%d: NumExtraHalos = %ld\n",ThisTask,NumExtraHalos);
#endif
#endif
    
    if(NumExtraHalos > 0 || TotNumHalosSent > 0)
      {
	//need to rebuild kdTree and fofdata
	// 1) remove halos sent other nodes from halos list - make sure to keep current halos with fof data linked properly
	// once you get to halo with Halos[i].index == 0, it has to be removed
	// remove it by sliding all halos down a slot in the vector Halos and fofd
	// when you slide the halos down, all indexes >= i+1 have to be decreased by 1 in fofd structure along with sliding the halos down a slot in both Halos and fofd
	// when you do a slide, decrement CurrNumHalos to mark end of Halos vec at end of sliding steps
	currNumHalos = NumHalos;
	startLoc = 0;
	do {
	  
	  //marks if a halo has been slid
	  slidAHalo = 0;
	  
	  for(i=startLoc;i<currNumHalos;++i)
	    {
	      if(Halos[i].index == 0) //do not keep this halo
		{
		  for(j=i+1;j<currNumHalos;++j)
		    {
		      Halos[j-1] = Halos[j];
		      (*fofd)[j-1] = (*fofd)[j];
		      (*px)[j-1] = (*px)[j];
		      (*py)[j-1] = (*py)[j];
		      (*pz)[j-1] = (*pz)[j];
		    }
		  
		  --currNumHalos;
		  for(j=0;j<currNumHalos;++j)
		    {
		      if((*fofd)[j].head >= i+1)
			--((*fofd)[j].head);
		      if((*fofd)[j].tail >= i+1)
			--((*fofd)[j].tail);
		      if((*fofd)[j].link >= i+1)
			--((*fofd)[j].link);
		    }
		  
		  //start at position of halo that was removed and we slidAHalo
		  startLoc = i;
		  slidAHalo = 1;
		  break;
		}
	    }
	  
	} while(slidAHalo);
	
	//move extra halos down to new end of Halos vec
	if(currNumHalos < NumHalos)
	  for(i=0;i<NumExtraHalos;++i)
	    Halos[currNumHalos+i] = Halos[NumHalos + i];
	
	NumHalos = currNumHalos+NumExtraHalos;
	tmpHalos = (Halo*)realloc(Halos,sizeof(Halo)*NumHalos);
	assert(tmpHalos != NULL);
	Halos = tmpHalos;
	
	// 2) make px, py, pz, and fofd structures longer - init properly
	tmpFloat = (float*)realloc(*px,sizeof(float)*NumHalos);
	assert(tmpFloat != NULL);
	*px = tmpFloat;
	tmpFloat = (float*)realloc(*py,sizeof(float)*NumHalos);
	assert(tmpFloat != NULL);
	*py = tmpFloat;
	tmpFloat = (float*)realloc(*pz,sizeof(float)*NumHalos);
	assert(tmpFloat != NULL);
	*pz = tmpFloat;
	tmpFoFData = (FoFData*)realloc(*fofd,sizeof(FoFData)*NumHalos);
	assert(tmpFoFData != NULL);
	*fofd = tmpFoFData;
	for(i=0;i<NumExtraHalos;++i)
	  {
	    (*px)[currNumHalos+i] = Halos[currNumHalos+i].pos[0];
	    (*py)[currNumHalos+i] = Halos[currNumHalos+i].pos[1];
	    (*pz)[currNumHalos+i] = Halos[currNumHalos+i].pos[2];
	    (*fofd)[currNumHalos+i].head = currNumHalos+i;
	    (*fofd)[currNumHalos+i].tail = currNumHalos+i;
	    (*fofd)[currNumHalos+i].length = 1;
	    (*fofd)[currNumHalos+i].link = -1;
	  }
	
	// 3) rebuild kdTree
	destroykdTree(*td);
	*td = buildkdTree(*px,*py,*pz,(int) NumHalos,NULL);
	
	// 4) add new halos to fofd structure
	for(i=currNumHalos;i<NumHalos;++i)
	  {
	    searchRadius = halo_exlcusion_radius(Halos[i].mass*haloFinderData.PartMass,globalMaxHaloMass*haloFinderData.PartMass);
	    append_halo_fofhalogroups(*td,*px,*py,*pz,periodic,searchRadius,nbrsRad2,nbrsInd,maxNumNbrs,*fofd,i);
	  }
      }
    
    //get total # of extra halos on all tasks - if this is > 0 then need to do at least another round of sendrecvs
    MPI_Allreduce(&NumExtraHalos,&GlobalNumExtraHalos,1,MPI_LONG,MPI_SUM,MPI_COMM_WORLD);
    
#ifdef DEBUG
    if(ThisTask == 0)
      fprintf(stderr,"%d: # of halos moved in FoF join = %ld\n",ThisTask,GlobalNumExtraHalos);
#endif
    
  } while(GlobalNumExtraHalos > 0);
  
  //clean up
  if(NumHalosToSendAlloc > 0)
    free(halosToSend);
  if(NumRecvHalosAlloc > 0)
    free(recvHalos);
  if(NumHaloIndsToExportAlloc)
    free(haloIndsToExport);
  free(domainCenterX);
  free(domainCenterY);
  free(domainCenterZ);
}

FoFData *build_fofhalogroups(kdTreeData *td, float *px, float *py, float *pz, int periodic,
			     float globalMaxHaloMass, float **nbrsRad2, int **nbrsInd, int *maxNumNbrs)
{
  int i;
  FoFData *fofd;
  float searchRadius;
  
  //alloc memory for groups
  fofd = (FoFData*)malloc(sizeof(FoFData)*NumHalos);
  assert(fofd != NULL);
    
  //setup for loop
  for(i=0;i<NumHalos;++i)
    {
      fofd[i].head = i;
      fofd[i].tail = i;
      fofd[i].link = -1;
      fofd[i].length = 1;
    }
  
  for(i=0;i<NumHalos;++i)
    {
      searchRadius = halo_exlcusion_radius(Halos[i].mass*haloFinderData.PartMass,globalMaxHaloMass*haloFinderData.PartMass);
      append_halo_fofhalogroups(td,px,py,pz,periodic,searchRadius,nbrsRad2,nbrsInd,maxNumNbrs,fofd,i);
    }
    
  return fofd;
}

