#include <mpi.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include "common.h"

//
//  benchmarking program
//
int main( int argc, char **argv )
{    
	//
	//  process command line parameters
	//
	if( find_option( argc, argv, "-h" ) >= 0 )
	{
		printf( "Options:\n" );
		printf( "-h to see this help\n" );
		printf( "-n <int> to set the number of particles\n" );
		printf( "-o <filename> to specify the output file name\n" );
		return 0;
	}

	int n = read_int( argc, argv, "-n", 1000 );
	int mdl = read_int( argc, argv, "-mdl", 0 );
	char *savename = read_string( argc, argv, "-o", NULL );

	//
	//  set up MPI
	//
	int n_proc, rank;
	int left, right, periodic;
	MPI_Comm commring;
	MPI_Init( &argc, &argv );
	MPI_Comm_size( MPI_COMM_WORLD, &n_proc );
	MPI_Comm_rank( MPI_COMM_WORLD, &rank );
	MPI_Request request[2];
	MPI_Status  statuses[2];

	
	// Create a ring topology
	periodic = 1; 
	MPI_Cart_create( MPI_COMM_WORLD, 1, &n_proc, &periodic, 1, &commring ); 
	MPI_Cart_shift( commring, 0, 1, &left, &right ); 

	//  allocate generic resources
	FILE *fsave = savename && rank == 0 ? fopen( savename, "w" ) : NULL;
	particle_t *particles = (particle_t*) malloc( n * sizeof(particle_t) );

	MPI_Datatype PARTICLE;
	MPI_Type_contiguous( 6, MPI_DOUBLE, &PARTICLE );
	MPI_Type_commit( &PARTICLE );

	//  set up the data partitioning across processors
	int particle_per_proc = (n + n_proc - 1) / n_proc;
	int *partition_offsets = (int*) malloc( (n_proc+1) * sizeof(int) );
	for( int i = 0; i < n_proc+1; i++ )
		partition_offsets[i] = min( i * particle_per_proc, n );

	int *partition_sizes = (int*) malloc( n_proc * sizeof(int) );
	for( int i = 0; i < n_proc; i++ )
		partition_sizes[i] = partition_offsets[i+1] - partition_offsets[i];

	//  allocate storage for local partition
	int nlocal = partition_sizes[rank];
	particle_t *local = (particle_t*) malloc( nlocal * sizeof(particle_t) );

	//  initialize and distribute the particles (that's fine to leave it unoptimized)
	set_size( n );
	if( rank == 0 )
		init_particles( n, particles );
	MPI_Scatterv( particles, partition_sizes, partition_offsets, PARTICLE, local, nlocal, PARTICLE, 0, MPI_COMM_WORLD );

	MPI_Allgatherv( local, nlocal, PARTICLE, particles, partition_sizes, partition_offsets, PARTICLE, MPI_COMM_WORLD );

	//
	// build own tree using the particles sent
	//


	mp_box ***boxes;	// This will be the array containing all of the boxes/simulation domains
	int depth_level;	// The number of fast multipole method levels

	// Calculates what the depth level should be
	// The depth level is the number of layers our simulation will have
	if(mdl == 0)
		depth_level = ceil(log10(n));
	else
		depth_level = mdl;
	printf("depth_level = %d \n", depth_level);

	// Each layer is a 2 dimensional array containing boxes
	// layer 0 has 1 box
	// layer 1 has 4 boxes
	// layer 2 has 8 boxes
	// etc...
	boxes = new mp_box**[depth_level + 1];	// Creates the level 0 multi dimensional array

	// box_list is a list each thread has, in the case of serial there is only 1 box_list
	// box_list contains the list of all the boxes, on each layer, that fmm must go through and calculate
	// This is done here so that fmm can just do calculations instead of figuring out in which
	// boxes to do the calculation
	vector < vector < pair < int, int > > > box_list;
	for(int i = 0; i <= depth_level; i++)
	{
		vector < pair<int, int > > dummy_v;
		box_list.push_back(dummy_v);
	}

	// 
	// Begin to initialize world and boxes
	//

	// Creates the main box, this is box number 0, the root
	mp_box world;
	mp_init_world(&world);
	for(int i = 0; i < n; i++ )
		world.add_new(particles[i]);

	//
	// Initialize the world and sort the particles into the boxes
	//
	// Initializes the 2 dimensional arrays, creates the boxes
	mp_initializations(boxes, depth_level);
	// Links the root boxes 4 children to it and sends them their particles
	mp_split(world, boxes, 0, 0);
	// Links the 4 level 1 children with their own children
	// and finishes linking all the other boxes with their children
	// also, finishes distributing all the particles to each box accordingly to their coordinates
	mp_build_tree(boxes, 1, depth_level, 0, 0, 1, 1);

	// This part will calculate which of the boxes this thread is responsible for
	// Each thread only calculates for a part of the simulation domain
	// This is done at level 2 where there are 16 boxes
	// at maximum this means we can have 16 threads
	int x = rank / 2;
	int y = rank % 2;

	int avg = 16 / n_proc;
	int mod = 16 % n_proc;

	int index;
	int next_index;

	if(mod == 0)
	{
		index = rank * avg;
		next_index = (rank + 1) * avg;
	}else if(rank < mod)
	{
		index = rank * (avg + 1);
		next_index = (rank + 1) * (avg + 1);
	} else
	{
		index = mod * (avg + 1) + (rank - mod) * avg;
		next_index = mod * (avg + 1) + (rank - mod + 1) * avg;
	}

	int l2x = index / 4;
	int l2y = index % 4;
	int l2xe = min((next_index - 1) / 4, 3);
	int l2ye = min((next_index - 1) % 4, 3);

	//--------------------------------------------------------

	//
	// Creates the arrays that will be used to store the sent and recieved particles
	// and a dummy particle that will represent the end of the array/buffer. Like the 0 byte at a strings end.
	//
	particle_t *send = (particle_t*) malloc ( (n + 1) * sizeof(particle_t));
	particle_t *recv = (particle_t*) malloc ( (n + 1) * sizeof(particle_t));
	particle_t dummy;
	dummy.x = -1;

	// This part wanders through the level 2 boxes this process got assigned to itself
	// It calls the function mp_find_bottom_box(), this function goes to the bottom layer
	// children of this box and add them to this process's bottom_boxes list.
	int tcx = l2x;
	int tcy = l2y;
	for(int nn = next_index; index < nn; nn--)
	{
		if(tcy > 3)
		{
			tcy = 0;
			tcx++;
		}
		mp_find_bottom_box(boxes[2][tcx][tcy], boxes, box_list, depth_level, tcx, tcy);
		tcy++;
	}

	//
	//  simulate a number of time steps
	//
	double simulation_time = read_timer( );

	for( int step = 0; step < NSTEPS; step++ )
	{
		// Declares temporary counters
		int i;

		//  save current step if necessary (slightly different semantics than in other codes)
		if( fsave && (step%SAVEFREQ) == 0 )
			mp_save( fsave, n, world.element );

		// Calls Fast Multipole Method
		mp_fmm(boxes, depth_level, box_list);	


		// Wait for everyone to finish their FMM
		// Clear the particles from the world box
		MPI_Barrier(MPI_COMM_WORLD);
		world.element.clear();		

		// This code goes through all of the particles at the bottom of the depth_level
		// It goes through all of the boxes that are at the bottom and assigned to this thread
		// This is possible because this thread has it's boxes referenced in a list
		// It then moves the particles, resets their forces, adds them to the send buffer and adds them
		// to the root box of this thread for resorting after all the other particles are recieved

		// Counter, number of particles to be sent
		int count = 0;					
		for(i = 0; i < box_list[depth_level].size(); i++)
		{
			int boxx = box_list[depth_level][i].first;
			int boxy = box_list[depth_level][i].second;
			for(int j = 0; j < boxes[depth_level][boxx][boxy].element.size(); j++)
			{ 
				// move all particles in the base level
				move(boxes[depth_level][boxx][boxy].element[j]);	

				// refill the send buffer
				send[count] = boxes[depth_level][boxx][boxy].element[j];

				// clear the data in the base level
				boxes[depth_level][boxx][boxy].element[j].ax = boxes[depth_level][boxx][boxy].element[j].ay = 0;

				// Update the world with cleared particles
				world.add_new(boxes[depth_level][boxx][boxy].element[j]);

				// Increament the counter
				count ++;
			}
		}
		send[count] = dummy;	// Adda dummy box to be sent around, this dummy will mark the end of the list.

		
		// This implements the token passing topology.

		// This part goes through all of the processes
		// each process recieves particles, saves them and passes them to the next process
		// this way each process gets the other processes particles
		for(i = 0; i < n_proc - 1; i ++)
		{
			// Prepare to send the box 
			if(i != (n_proc - 1))
			{
				// Recieves a buffer of particles and sends them to the next neighbor
				MPI_Isend(send, (n + 1) * sizeof(particle_t), MPI_BYTE, right, i, commring, &request[0]);
				MPI_Irecv(recv, (n + 1) * sizeof(particle_t), MPI_BYTE, left, i, commring, &request[1]);
			}

			// Wait untill everyone have sent and recieved
			if (i != (n_proc - 1))  
				MPI_Waitall( 2, request, statuses ); 

			// Recieve all of the particles and add them to your world
			// While doing so you can also reset the force on them at the same time
			// and be a bit efficient
			int k = 0;
			while (recv[k].x != dummy.x )
			{
				send[k] = recv[k];
				world.add_new(recv[k]);
				k++;
			}

			// As the last particle, add the dummy particle
			// This particle signals the end of list
			send[k] = dummy;
		}

		// Sort all the elements recieved in the world to it's 4 children
		mp_sort_elements(world, boxes, 0, 0);

		// Now let the 4 children assign the particles to all of their children
		mp_assign_elements(boxes, 1, depth_level, 0, 0, 1, 1);
		MPI_Barrier(MPI_COMM_WORLD);
	}
	simulation_time = read_timer( ) - simulation_time;


	if( rank == 0 )
		printf( "n = %d, n_procs = %d, simulation time = %g s\n", n, n_proc, simulation_time );

	//
	//  release resources
	//
	free( partition_offsets );
	free( partition_sizes );
	free( local );
	free( particles );
	if( fsave )
		fclose( fsave );

	MPI_Finalize( );

	return 0;
}
