#include <mpi.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include "common.h"
#include <list>
#include <math.h>
#include <string.h>
#include <omp.h>

//#define DEBUG 1

//#define mpilog2(fmt, ...) {printf("[%02d]>>> " fmt, rank, ##__VA_ARGS__); fflush(stdout);}
//#define stop MPI_Barrier(MPI_COMM_WORLD)

#define mpilog2(fmt, ...)
#define stop 

#ifdef DEBUG
#define mpilog(fmt, ...) {printf("[%02d]>>> " fmt, rank, ##__VA_ARGS__); fflush(stdout);}
#else
#define mpilog(fmt, ...)
#endif

using namespace std;

particle_t nullp = { -1.0, -1.0, 0.0, 0.0, 0.0, 0.0, -1 };

/* Given an array ARR, returns how many particles are not null, starting
 * from the beginning. 
 */
int count_non_null_particles (particle_t *arr, int max) {

    int t = 0;
    while (arr[t].x >=0 && t<max) 
        t++;
    return t;
}

/* Fills the first n elements of the array with the null particle */
void initialize_null (particle_t *arr, int n) {
    for (int i=0; i<n; i++)
        arr[i] = nullp;
}


// Sanitizes (get's rid of the null holes) LOCAL of length LEN
// Returns the number of non-null particles
int sanitize_array (particle_t *local, int len) {

    // l is the limit on left, which we know is null free for sure
    // r is the limit on right, which we know for sure is null
    int l=-1, r=len-1;
   
    while (l<=r) {
        l++;
        if (local[l].x < 0 ){ 
	    // find first non-null from the right
            while(local[r].x < 0){
	        r--;
                if (r<=l)
                    return l;
            }
            local[l] = local[r];
            local[r] = nullp;
        }
    } 

    return l;
} 

/* Divvy up particles into strips.

 * The particles are in SRC array to begin with on rank=0. At the end, the
 * particles end up in DST array on each of the N_PROC processors. SRC has
 * at most N non-null particles, and the size of each DST should be enough
 * to hold the non-null particles.
 * SRC becomes null.
 */

void divvy_strips(particle_t *src, particle_t *dest,
                        int n, int n_proc, int stripH,
                        int rank, MPI_Datatype datatype, 
                        MPI_Comm comm){

    int stripDisp[n_proc], stripCount[n_proc];

    if (rank == 0) {

        // Make sure the source array is sanitized
        sanitize_array (src, n);
        int num = count_non_null_particles (src, n);    
    
        // create temporary containers, one for each strip
        list<particle_t> tempLists[n_proc];
        for (int i =0; i <num; i++){
            int listnum = (src[i].y)/(stripH*cutoff);
            tempLists[listnum].push_back(src[i]);
        }
    
        // Reorder particles in SRC, so that n-th strip's particles are before
        // n+1-th strip.
        for (int i=0, p0=0; i<n_proc; i++){
            stripDisp[i] = p0;
            stripCount[i] = tempLists[i].size();
            list<particle_t>::iterator iter;
            for(iter=tempLists[i].begin(); iter!=tempLists[i].end();++iter) {
         		src[p0] = *iter;           
    	        p0++; 
            }
            // Let's be a little sympathetic with the memory manager
            tempLists[i].clear();
        }
    }
    // Let's do some MPI ju-jitsu to send data to the individual
    // processors.
    mpilog(" [divvy_strips] reaching barrier just before communication\n");
    MPI_Barrier (comm);
    
    // send stripCount array to all processors
    MPI_Bcast(stripCount, n_proc, MPI_INT, 0, comm);

    // give each strip its particles 
    MPI_Scatterv(src, stripCount, stripDisp,
                datatype, dest, stripCount[rank],
                datatype, 0, comm);


    // This is the point where all the master has already distributed
    // particles to all the processors. An implicit barrier is already
    // there, but we can add an explicit one too, just in case
    MPI_Barrier (comm);

    // no harm in cleaning the src. We do not want random data to be lying
    // around. So its better to move particles rather than copying it.
    initialize_null (src, n);
}



//  benchmarking program
int main( int argc, char **argv )
{  
    //  process command line parameters
    if( find_option( argc, argv, "-h" ) >= 0 )
    {
        printf( "Options:\n" );
        printf( "-h to see this help\n" );
        printf( "-n <int> to set the number of particles\n" );
        printf( "-o <filename> to specify the output file name\n" );
        return 0;
    }
    
    int n = read_int( argc, argv, "-n", 1000 );
    char *savename = read_string( argc, argv, "-o", NULL );
    
    //  set up MPI
    int n_proc, rank;
    MPI_Init( &argc, &argv );
    MPI_Comm_size( MPI_COMM_WORLD, &n_proc );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
   
    mpilog2("Hello 0\n"); stop; 
    //  allocate generic resources
    FILE *fsave = savename ? fopen( savename, "w" ) : NULL;
    particle_t *particles = (particle_t*) malloc( n * sizeof(particle_t) );
    
    MPI_Datatype PARTICLE;
    MPI_Type_contiguous( 7, MPI_DOUBLE, &PARTICLE ); 
    MPI_Type_commit( &PARTICLE );
    
    //  allocate storage for local partition
    int nlocal = (n/n_proc)*3; // allocate space for 4x average num of particles in a processor
    
    //  initialize and distribute the particles 
    set_size (n);
    double size = sqrt(density*n);
    int numBoxes =(int) ceil(size/cutoff);
    // Figure out box dimensions corresponding to each processor
    int stripH = (int) ceil((double)numBoxes/(double)n_proc);
    int stripW = numBoxes;


    // Create all the containers that will hold the particles.
    
    // LOCAL is the local copy of the particles that belong to this
    // processor in the current step of simulation.
    particle_t *local = (particle_t*) malloc( nlocal * sizeof(particle_t) );
    initialize_null (local, nlocal);
    // BOXES is the 2D array of lists which helps O(n) algorithm
    list<particle_t> boxes[stripW][stripH+2];
    // This is a fair estimate of how many particles can be there in one
    // horizontal row of boxes
    int maxRowParticles = nlocal/stripH;
    // TOPBOXES is an array of particles in the topmost row of boxes that
    // affects the neighbor above you.
    particle_t topBoxes[maxRowParticles];
    // BOTTOMBOXES is an array of particles in  the bottommost row of boxes
    // that affects the neighbor below you.
    particle_t bottomBoxes[maxRowParticles];
    // TOPNEIGHBORS is an array of particles from the neighbor above which
    // affect our particles.
    particle_t topNeighbors[maxRowParticles];
    // BOTTOMNEIGHBORS is an array of particles from the neighbor below
    // which affect our particles.
    particle_t bottomNeighbors[maxRowParticles];
    // BANNEDP is an array of particles this processor disowned at the end
    // of simulation step.
    particle_t bannedP[maxRowParticles];
    // REFUGEES is an array of particles this processor received from the
    // god at the beginning of this step of simulation.
    particle_t refugees[maxRowParticles];

    mpilog2("Hello 1\n"); stop; 
    // Initialize particles on processor 0, and send them to others.
    if( rank == 0 )
        init_particles( n, particles );

    divvy_strips(particles, local, n, n_proc, stripH, rank, PARTICLE, 
                        MPI_COMM_WORLD);


    mpilog2("Hello 2\n"); stop; 

    //  simulate a number of time steps
    double simulation_time = read_timer( );
    for( int step = 0; step < NSTEPS; step++ )
    {

        mpilog2("Hello 3 %d\n", step); stop; 
        // At the beginning of the loop, LOCAL is the list of particles we
        // need to be concerned about in this simulation step. 
        // i.e.: LOCAL = LOCAL(from last simulation step) - bannedP + refugees

        sanitize_array (local, nlocal);

        // Let's clean the house. We have a party very soon
        {
            initialize_null (topBoxes, maxRowParticles);
            initialize_null (bottomBoxes, maxRowParticles);
            initialize_null (topNeighbors, maxRowParticles);
            initialize_null (bottomNeighbors, maxRowParticles);
            initialize_null (bannedP,maxRowParticles);
            initialize_null (particles, n);  
            initialize_null (refugees,maxRowParticles);
    
            // Let's clean up before we begin
            for (int i=0; i<stripW; i++)
                for (int j=0; j<stripH+2; j++)
                    boxes[i][j].clear();

        }

        mpilog2("Hello 4 %d\n", step); stop; 


        // numParticles are the particles which this processor can MOVE.
    	int numParticles = count_non_null_particles (local, nlocal);
        mpilog("numParticles: %d, maxRowParticles: %d, stripH: %d, stripW: %d\n", 
                    numParticles, maxRowParticles, stripH, stripW);
    	
        // Let's fill up the boxes.
	    for (int i = 0; i < numParticles; i++){
		    int x = (int)(local[i].x/cutoff);
    		int y = 1 + (int)(local[i].y/cutoff)-stripH*rank; 
	    	boxes[x][y].push_back(local[i]);
    	}	


        // Send gifts to neighbors
        {
    
            // Fill up topBoxes, so that we can parcel it to our dear
            // neighbor.
            if (rank != 0) {
    
                int temp = 0;    
                for (int i =0; i < stripW; i++){
                    list<particle_t>::iterator iter;
                    for (iter = boxes[i][1].begin(); iter != boxes[i][1].end(); iter++) {
                        // copy the box contents from the first row of boxes to the
                        // array topboxes
                        topBoxes[temp] = *iter;
                        temp++;
                    }
                }
            }
   
            // Fill up bottomBoxes, so that we can parcel it to our dear
            // neighbor.
            if (rank != n_proc-1) {
    
                int temp = 0;    
                for (int i =0; i < stripW; i++){
                    list<particle_t>::iterator iter;
        
                    for (iter = boxes[i][stripH].begin(); iter != boxes[i][stripH].end(); iter++) {
                        // copy the box contents from the last row of boxes to the
                        // array bottomboxes
                        bottomBoxes[temp] = *iter;
                        temp++;
                    }
                }
            }
    
            // Send our neighbors information about neighboring boxes -- first row and last row
            MPI_Request request0, request1, request2, request3;
            MPI_Status status0, status1, status2, status3;
    
            if (rank != 0) {
                // send the topBoxes to the neighbor above you
                mpilog("Sending %d (total) particles to rank %d\n", (maxRowParticles), rank-1);
                MPI_Isend(topBoxes, (maxRowParticles), PARTICLE, (rank-1), rank*2,
                                MPI_COMM_WORLD, &request0);
            }   
    
            if (rank != n_proc-1) {
                // receive particles just sent by your neighbor
                mpilog("Receiving %d (total) particles from rank %d\n", (maxRowParticles), rank+1);
                MPI_Irecv(bottomNeighbors, (maxRowParticles), PARTICLE, (rank+1),
                                (rank+1)*2, MPI_COMM_WORLD, &request1);
    
            }
    
            if (rank !=0) MPI_Wait(&request0, &status0);
            if (rank != n_proc-1) MPI_Wait(&request1, &status1);
    
            MPI_Barrier(MPI_COMM_WORLD);
            mpilog("Top boxes sent up successfully\n");
    
            mpilog("I got %d non-null particles from the strip below me (%d)\n", 
                        count_non_null_particles(bottomNeighbors,
                            maxRowParticles), rank+1);
    
            if (rank != n_proc-1) {
                // send the bottomBoxes to the neighbor below you
                mpilog("Sending %d (total) particles to rank %d\n", (maxRowParticles), rank+1);
                MPI_Isend(bottomBoxes, (maxRowParticles), PARTICLE, (rank+1),
                                rank*2+1, MPI_COMM_WORLD, &request2);
            }
    
            if (rank != 0) {
                // receive particles just sent by your neighbor
                mpilog("Receiving %d (total) particles from rank %d\n", (maxRowParticles), rank-1);
                MPI_Irecv(topNeighbors, (maxRowParticles), PARTICLE, (rank-1),
                                rank*2 - 1, MPI_COMM_WORLD, &request3);
    
            }
    
            if (rank != n_proc-1) MPI_Wait(&request2, &status2);
            if (rank != 0) MPI_Wait(&request3, &status3);
    
            MPI_Barrier(MPI_COMM_WORLD);
            mpilog("Bottom boxes sent down successfully\n");
    
            mpilog("I got %d non-null particles from the strip above me (%d)\n",
                            count_non_null_particles(topNeighbors,
                                maxRowParticles), rank-1);
    
        }

        // Now we need to copy the particles we just received into
        // appropriate boxes. This is setting up the first and the last row
        // of our 2D boxes array.
        int topCount = count_non_null_particles(topNeighbors, maxRowParticles);
        int bottomCount = count_non_null_particles(bottomNeighbors, maxRowParticles);

        for (int i=0; i<topCount; i++) {
            // Copy this to the list of boxes we have set up
            int x = (int) (topNeighbors[i].x/cutoff);
            boxes[x][0].push_back(topNeighbors[i]);
        }

        for (int i=0; i<bottomCount; i++) {
            // Copy this to the list of boxes we have set up
            int x = (int) (bottomNeighbors[i].x/cutoff);
            boxes[x][stripH+1].push_back(bottomNeighbors[i]);
        }

        //  compute all forces
        #pragma omp parallel for
        for( int i = 0; i < numParticles; i++ )
        {
            local[i].ax = local[i].ay = 0;
            
            int currBox_x = (int) (local[i].x/cutoff);
            int currBox_y = 1 + (int) (local[i].y/cutoff) - (stripH*rank);

            for (int I = max(0, 1-currBox_x); I < 3 && currBox_x-1+I<stripW; I++) {
                for(int J = max(0,1-currBox_y); J < 3 && currBox_y-1+J<stripH+2; J++){

                    for (list<particle_t>::iterator iter = boxes[currBox_x-1+I][currBox_y-1+J].begin(); 
                                iter != boxes[currBox_x-1+I][currBox_y-1+J].end(); iter++) {

                        apply_force(local[i], *iter);

                    }
                }
            }
        }
        
        //  move particles
        #pragma omp parallel for
        for( int i = 0; i < numParticles; i++ )
            move( local[i] );

        // Remove particles that no longer belong to this strip
        for( int i = 0, temp=0; i < numParticles; i++) {
            if (local[i].y < stripH*cutoff*rank ||
                    local[i].y >= stripH*cutoff*(rank+1)) {

                bannedP[temp] = local[i];
                temp++;
                local[i] = nullp;
            } 
        }

        // Fill the holes that we just created in LOCAL
        sanitize_array(local, nlocal);

        MPI_Barrier (MPI_COMM_WORLD);
        mpilog ("Now sending list of disowned particles to the god\n");

        // send particles to the root
        MPI_Gather (bannedP, maxRowParticles, PARTICLE,
                        particles, maxRowParticles, PARTICLE,
                        0, MPI_COMM_WORLD);  

        MPI_Barrier (MPI_COMM_WORLD);
        mpilog ("Rank 0 should have received the banished particles now.\n")


        divvy_strips(particles, refugees, n, n_proc, stripH, rank, PARTICLE, 
                        MPI_COMM_WORLD);
       
        MPI_Barrier(MPI_COMM_WORLD);

        // Need to add refugee particles to local
        int refugee_count = count_non_null_particles (refugees, maxRowParticles);
        int local_minus_banned = count_non_null_particles (local, nlocal);
        for (int i=0; i<refugee_count; i++) {
            local[local_minus_banned + i] = refugees[i];
        }


        // LOCAL is now LOCAL-BANNED+REFUGEES

        //  save current step if necessary
        if( fsave && (step%SAVEFREQ) == 0 ) {
       
            particle_t *saving_particle_ryan = 
                    (particle_t *) malloc(sizeof(particle_t)*n_proc*nlocal);

            // Gather all the particles on the master
            MPI_Gather (local, nlocal, PARTICLE, 
                        saving_particle_ryan, nlocal, PARTICLE,
                        0, MPI_COMM_WORLD);

            // sort them by global ID
            if (rank == 0) {
                sanitize_array (saving_particle_ryan,
                                                n_proc*nlocal);

                
                // save the stuff
                int dadada = count_non_null_particles(saving_particle_ryan,
                                                n_proc*nlocal);
 
                for (int i =0; i<dadada ; i++)
                    fprintf(fsave, "%04d%04d %10.8f %10.8f\n", 
                            step,
                            saving_particle_ryan[i].globalID,
                            saving_particle_ryan[i].x,
                            saving_particle_ryan[i].y);

            }

            free(saving_particle_ryan);
        }

    }
    simulation_time = read_timer( ) - simulation_time;
    
    if( rank == 0 )
        printf( "n = %d, n_procs = %d, simulation time = %g s\n", n, n_proc, simulation_time );
    
    // release resources
    free( local );
    free( particles );
    if( fsave )
        fclose( fsave );
    
    MPI_Finalize( );
    
    return 0;
}
