#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
#include "oom.h"
#include "cm.h"
#include "env.h"
#include "dtypes.h"
#include "log.h"
/** \file coll.c
 * \brief It contains all the structures for collective communication
 */

/*
 * ************************************
 * \author James Edmondson
 * \brief Used to get parent and children information for
 *        tree-based collective communications involving an
 *        arbitrary tree root. Group rank and
 *        size must be provided, and the function will compute
 *        the parent, and two children (if necessary). Most
 *        tree-based communications involve receiving from the parent
 *        and sending to the children.
 ***********************************
 */

int GetTreeMetrics(int root, int rank, int size, int * parent,
		        int * child1, int * child2)
{
	*parent = MPI_UNDEFINED;
	*child1 = MPI_UNDEFINED;
	*child2 = MPI_UNDEFINED;
	
	/* compute the logical rank of this process. Because rank is not changed
	 * outside of this function's scope we simply reuse it. After the next
	 * two lines, think of "rank" as logical rank in the tree. */
	
	if( rank >= root ) rank = rank - root + 1;
	else rank = size - root + rank + 1;

	/* compute logical parent / child ranks in tree from arbitrary root */
	
	if( rank > 1 ) *parent = rank / 2;
	if( rank <= size / 2 ) *child1 = rank * 2;
	if( *child1 != MPI_UNDEFINED && *child1 + 1 <= size ) *child2 = *child1 + 1;

	/* now change those logical ranks to group ranks */

	if( *parent != MPI_UNDEFINED )
		*parent = (root + *parent - 1) % size;
	if( *child1 != MPI_UNDEFINED )
		*child1 = (root + *child1 - 1) % size;
	if( *child2 != MPI_UNDEFINED )
		*child2 = (root + *child2 - 1) % size;

	debug("rank = %d, parent = %d, child1 = %d, child2 = %d\n",
			rank,*parent,*child1,*child2);
	
	return MPI_SUCCESS;	
}


/*
 * ************************************
 * \author Shashi Thakur, James Edmondson
 * \brief Creates a barrier condition amongst processes in a Communicator.
 *        Shashi originally implemented a version ot this that would 
 *        not compile. Currently, the implemention is considered sketchy
 *        as 1/10 times it manages to hang on 6+ processes.
 ***********************************
 */

int MPI_Barrier(MPI_Comm comm)
{
	int buff[0];
	int count=0;
	MPI_Bcast ((void *)buff,count,MPI_INT,0,comm);
	MPI_Bcast ((void *)buff,count,MPI_INT,0,comm);
    
	return MPI_SUCCESS;
}

/*
 * ************************************
 * \author James Edmondson
 * \brief Sends a message from a root node to all other processes
 *        in that communicator.
 ***********************************
 */

int MPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root,
	MPI_Comm comm)
{
	Comm *real_comm;
	Group *real_group = NULL;
	Request * real_request;
	MPI_Request request1, request2;
	Request * real_request1, * real_request2, * real_request3;
	int my_group_rank;
	int tsource = root;
	int tag = MPI_ANY_TAG;
	int parent, child1, child2;
	int ierr;
	real_comm = OOM_comm_lookup(comm);
	if (real_comm == NULL) return MPI_ERR_COMM;
	real_group = OOM_group_lookup(real_comm->group);
	if (real_group == NULL) return MPI_ERR_COMM;

	my_group_rank = real_comm->source;

	GetTreeMetrics(root,my_group_rank,real_group->num_members,
			&parent,&child1,&child2);
	
	if( parent != MPI_UNDEFINED )
	{
		CM_Recv(buf,count,datatype,parent,my_group_rank,
				real_comm->coll_context,tag,&request1);
		debug("Receiving from parent (%d)\n",parent);
		real_request1 = OOM_request_lookup(request1);
		CM_Wait(real_request1,&tsource,&tag,&ierr);
		debug("Finished Receive from parent (%d)\n",parent);
	}
	
	real_request1 = NULL;
	real_request2 = NULL;
	

	debug("Starting my sends to child1(%d) and child2(%d)\n",child1,child2);
	
	if( child1 != MPI_UNDEFINED )
	{
		CM_Send(buf,count,datatype,my_group_rank,child1,
				real_comm->coll_context,tag,real_group->members[child1],
				&request1);
		real_request1 = OOM_request_lookup(request1);
	}
	if( child2 != MPI_UNDEFINED )
	{
		CM_Send(buf,count,datatype,my_group_rank,child2,
				real_comm->coll_context,tag,real_group->members[child2],
				&request2);
		real_request2 = OOM_request_lookup(request2);
	}

	debug("About to wait on my sends to child1(%d) and child2(%d)\n",
			child1,child2);

	/* We use child1 != MPI_UNDEFINED instead of real_request1 != NULL
	 * because the checks have already been done and result calculated
	 * and a good compiler will simply use the result throughout. Eh,
	 * we might save a few microseconds :D! We take advantage of
	 * asynchronous sends to allow children to finish whenever either of
	 * them can. Synchronous sends would require child1 to finish before
	 * the request could be started for child2. */

	
	if( child1 != MPI_UNDEFINED || child2 != MPI_UNDEFINED )
	{
		if( child1 != MPI_UNDEFINED )
		{
			debug("Waiting on child1 send...\n");
			CM_Wait(real_request1,&tsource,&tag,&ierr);
		}
		if( child2 != MPI_UNDEFINED )
		{
			debug("Waiting on child2 send...\n");
			CM_Wait(real_request2,&tsource,&tag,&ierr);
		}
	}
	
	/*if( root == my_group_rank )
	{
		CM_SendAll(buf,count,datatype,root,real_comm,&requests);
	}
   	else
	{
		CM_Recv(buf,count,datatype,root,my_group_rank,
				real_comm->coll_context,tag,&request);
		real_request = OOM_request_lookup(request);
		CM_Wait(real_request,&tsource,&tag,&ierr);
	}*/


	return MPI_SUCCESS;
}

/*
 * ************************************
 * \author Zach Lowry, James Edmondson
 * \brief Gathers data from all process to some root node in a
 *        Communicator. Almost all of this was written by Zach.
 *        James wrote the section that copies the data from the
 *        root's send buffer into the proper place in the recv buffer.
 ***********************************
 */


int MPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
	void *recvbuf, int recvcount, MPI_Datatype recvtype, int root,
	MPI_Comm comm)
{	
	Comm *real_comm;
	Group *real_group = NULL;
	Request *real_request;
	MPI_Request request;
	Request *requests = NULL;
	int my_group_rank;
	int tsource = root;
	int tag = 1;
	int ierr;
	real_comm = OOM_comm_lookup(comm);
	if (real_comm == NULL) return MPI_ERR_COMM;
	real_group = OOM_group_lookup(real_comm->group);
	if (real_group == NULL) return MPI_ERR_COMM;

	my_group_rank = real_comm->source;

	if( root == my_group_rank )
	{
		/* first, copy root's sendbuf to the proper place in recvbuf */
		
		char * crecv = recvbuf;
		int displ = sizeof_base_type(recvtype) * my_group_rank * recvcount;
		memcpy(&crecv[displ],sendbuf,recvcount * sizeof_base_type(recvtype));

		/* next, receive from everyone else */
		
		CM_RecvAll(recvbuf,recvcount,recvtype,root,real_comm,&requests);
	}
    else
    {
        CM_Send(sendbuf,sendcount,sendtype,my_group_rank,root,
				real_comm->coll_context,1,real_group->members[root],&request);
		real_request = OOM_request_lookup(request);
        CM_Wait(real_request,&tsource,&tag,&ierr);
    }

    return MPI_SUCCESS;
}

/*
 * ************************************
 * \author Vijaya Dudekonda, James Edmondson
 * \brief Scatters data from a root node to all other processes in the
 *        Communicator. Written by both authors during a Meeting of the
 *        Minds.
 ***********************************
 */

int MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
	void *recvbuf, int recvcount, MPI_Datatype recvtype, int root,
	MPI_Comm comm)
{
	Comm *real_comm;
	Group *real_group = NULL;
	Request *real_request;
	MPI_Request request;
	int my_group_rank;
	int tsource = root;
	int group_size;
	int tag = 1;
	int ierr;
	char * csend = sendbuf;
	char * crecv = recvbuf;
	int i = 0, cur = 0;
	real_comm = OOM_comm_lookup(comm);
	if (real_comm == NULL) return MPI_ERR_COMM;
	real_group = OOM_group_lookup(real_comm->group);
	if (real_group == NULL) return MPI_ERR_COMM;

	my_group_rank = real_comm->source;
	group_size = real_group->num_members;
	
	
	if( root == my_group_rank )
	{
		cur = 0;
		for( i = 0; i < group_size ; 
				++i, cur+=sendcount*sizeof_base_type(sendtype))
		{
			if( i != root )
			{
				CM_Send(&csend[cur],sendcount,sendtype,
					root,i,real_comm->coll_context,
					MPI_ANY_TAG,real_group->members[i],&request);
			}
			else
			{
				memcpy(crecv,&csend[cur],
					sizeof_base_type(recvtype) * recvcount);
			}
		}
	}
	else
 	{
		CM_Recv(recvbuf,recvcount,recvtype,root,my_group_rank,
			real_comm->coll_context,MPI_ANY_TAG,&request);
		real_request = OOM_request_lookup(request);
		CM_Wait(real_request,&tsource,&tag,&ierr);
	 }


	return MPI_SUCCESS;
}

/*
 * ************************************
 * \author Vijaya Dudekonda, James Edmondson
 * \brief Scatters data from a root node to all other processes in the
 *        Communicator, with a user provided displacement. Written by
 *         both authors during a Meeting of the Minds.
 ***********************************
 */

int MPI_Scatterv(void *sendbuf, int *sendcounts, int *displs,
	MPI_Datatype sendtype, void *recvbuf, int recvcount,
	MPI_Datatype recvtype, int root, MPI_Comm comm)
{
	Comm *real_comm;
	Group *real_group = NULL;
	Request *real_request;
	MPI_Request request;
	int my_group_rank;
	int group_size;
	int ierr;
	int i = 0;
	MPI_Aint sendtype_extent;
	int offset;

	real_comm = OOM_comm_lookup(comm);
	if (real_comm == NULL) return MPI_ERR_COMM;

	real_group = OOM_group_lookup(real_comm->group);
	if (real_group == NULL) return MPI_ERR_COMM;

	ierr = MPI_Type_extent(sendtype, &sendtype_extent);
	if (ierr != MPI_SUCCESS) return MPI_ERR_TYPE;

	my_group_rank = real_comm->source;
	group_size = real_group->num_members;
	
	if( my_group_rank == root )
	{
		for( i = 0; i < group_size; ++i )
		{
			offset = displs[i] * sendtype_extent;
			if( i != root )
			{
				CM_Send(sendbuf + offset, sendcounts[i], sendtype,
					root, i, real_comm->coll_context,
					MPI_ANY_TAG, real_group->members[i], &request);
			}
			else
			{
				debug("dst: %d src: %d", recvbuf, sendbuf + offset);
				memmove(recvbuf, (char *) sendbuf + offset,
					sizeof_base_type(sendtype) * sendcounts[i]);
			}
		}
	}
	else
 	{
		CM_Recv(recvbuf,recvcount,recvtype,root,my_group_rank,
			real_comm->coll_context,MPI_ANY_TAG,&request);
		real_request = OOM_request_lookup(request);
		CM_Wait2(real_request, MPI_STATUS_IGNORE);
 	}
	return MPI_SUCCESS;
}

/*
 * ************************************
 * \author Vijaya Dudekonda, James Edmondson
 * \brief Gathers data from all processes in the Communicator into
 *        a single buffer, then replicates that data across all processes
 *        in that Communicator.Written by both authors during a Meeting 
 *        of the Minds.
 ***********************************
 */

int MPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
	void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
{
	Comm *real_comm;
	Group *real_group = NULL;
	int group_size;
	real_comm = OOM_comm_lookup(comm);
	if (real_comm == NULL) return MPI_ERR_COMM;
	real_group = OOM_group_lookup(real_comm->group);
	if (real_group == NULL) return MPI_ERR_COMM;

	if( sendcount != recvcount ) return MPI_ERR_COUNT;
	if( sendtype != recvtype ) return MPI_ERR_TYPE;
	
	group_size = real_group->num_members;

	int * isend = sendbuf;
	int * irecv = recvbuf;
	
	MPI_Gather(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype,0,comm);
	MPI_Bcast(recvbuf,recvcount*group_size,recvtype,0,comm);

	return MPI_SUCCESS;
}

int MPI_Allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
	void *recvbuf, int *recvcounts, int *displs, MPI_Datatype recvtype,
	MPI_Comm comm)
{
	Comm *real_comm;
	Group *real_group = NULL;
	int group_size,my_group_rank;
	real_comm = OOM_comm_lookup(comm);
	if (real_comm == NULL) return MPI_ERR_COMM;
	real_group = OOM_group_lookup(real_comm->group);
	if (real_group == NULL) return MPI_ERR_COMM;

	my_group_rank = real_comm->source;
	group_size = real_group->num_members;

	if( sendcount != recvcounts[my_group_rank]) return MPI_ERR_COUNT;
	if( sendtype != recvtype ) return MPI_ERR_TYPE;
	
	int * isend = sendbuf;
	int * irecv = recvbuf;
	
	MPI_Gatherv(sendbuf,sendcount,sendtype,recvbuf,recvcounts,displs,recvtype,0,comm);
	MPI_Bcast(recvbuf,recvcounts[my_group_rank]*group_size,recvtype,0,comm);

	return MPI_SUCCESS;
}

/*
 * ************************************
 * \author Vijaya Dudekonda, James Edmondson
 * \brief Scatters data from each process to a displacement in the recvbuf.
 *        Operates like a sort of matrix transpose. Written by both authors
 *        during a Meeting of the Minds.
 ***********************************
 */

int MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype,
	void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
{
	Comm *real_comm;
	Group *real_group = NULL;
	int my_group_rank;
	int group_size;
	char * crecv = recvbuf;
	int i = 0, cur = 0;
	real_comm = OOM_comm_lookup(comm);
	if (real_comm == NULL) return MPI_ERR_COMM;
	real_group = OOM_group_lookup(real_comm->group);
	if (real_group == NULL) return MPI_ERR_COMM;

	my_group_rank = real_comm->source;
	group_size = real_group->num_members;

	for( i = 0; i < group_size; i++, cur+=(recvcount*sizeof_base_type(recvtype)))
	{
	//	cur = cur + (recvcount*sizeof_base_type(recvtype));
		MPI_Scatter(sendbuf, sendcount, sendtype,
		        &crecv[cur], recvcount, recvtype, i,
		        comm);

	}
	return MPI_SUCCESS;
}

int MPI_Alltoallv(void *sendbuf, int *sendcounts, int *sdispls,
	MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *rdispls,
	MPI_Datatype recvtype, MPI_Comm comm)
{
	Comm *real_comm;
	Group *real_group = NULL;
	int my_group_rank;
	int group_size;
	char * csend = sendbuf;
	char * crecv = recvbuf;
	int i = 0, recv_cur = 0,send_cur = 0;
	int ierr;
	MPI_Aint recvtype_extent;
	MPI_Aint sendtype_extent;	
	
	real_comm = OOM_comm_lookup(comm);
	if (real_comm == NULL) return MPI_ERR_COMM;
	real_group = OOM_group_lookup(real_comm->group);
	if (real_group == NULL) return MPI_ERR_COMM;

	ierr = MPI_Type_extent(recvtype, &recvtype_extent);
	if (ierr != MPI_SUCCESS) return MPI_ERR_TYPE;
	
	ierr = MPI_Type_extent(sendtype, &sendtype_extent);
	if (ierr != MPI_SUCCESS) return MPI_ERR_TYPE;

	my_group_rank = real_comm->source;
	group_size = real_group->num_members;

	for( i = 0; i < group_size; i++) 
	{
		recv_cur= rdispls[i] * recvtype_extent;
		send_cur = sdispls[i] * sendtype_extent;
		MPI_Scatterv(csend, sendcounts, sdispls,sendtype,
		        crecv+recv_cur, recvcounts[i], recvtype, i,
		        comm);
	}
	return MPI_SUCCESS;
}

/*
 * ************************************
 * \author James Edmondson
 * \brief Performs a mathematical or user defined operation on data
 *        provided by each process of a Communicator.
 ***********************************
 */

int MPI_Reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
	MPI_Op op, int root, MPI_Comm comm)
{

	Comm *real_comm = NULL;
	Group *real_group = NULL;
	Request *real_request = NULL;
	Op *real_op = NULL;
	MPI_Request request;
	int my_group_rank;
	int i = 0;
	int notused;
	int group_size;
	int ierr;
	apr_pool_t *mempool;


	real_comm = OOM_comm_lookup(comm);
	if( real_comm == NULL ) return MPI_ERR_COMM;
	real_group = OOM_group_lookup(real_comm->group);
	if( real_group == NULL ) return MPI_ERR_COMM;
	real_op = OOM_op_lookup(op);
	if( real_op == NULL ) return MPI_ERR_OP;
	
	my_group_rank = real_comm->source;
	group_size = real_group->num_members;

	apr_pool_create(&mempool, NULL);

	if( my_group_rank == root )
	{
		real_op->function((void *)sendbuf,recvbuf,&count,&datatype);
		
		char * buf = (char *)apr_palloc(mempool,
				count * sizeof_base_type(datatype));

		for( i = 0; i < group_size; i++ )
		{
			if( i != root )
			{
				CM_Recv(buf,count,datatype,i,my_group_rank,
						real_comm->coll_context,
						MPI_ANY_TAG,&request);
				real_request = OOM_request_lookup(request);
				CM_Wait(real_request,&notused,&notused,&ierr);
				real_op->function((void *)buf,recvbuf,&count,&datatype);
			}
		}
	}
	else
	{
		CM_Send(sendbuf,count,datatype,my_group_rank,root,
				real_comm->coll_context,MPI_ANY_TAG,
				real_group->members[root],&request);
		real_request = OOM_request_lookup(request);
		CM_Wait(real_request,&my_group_rank,&root,&ierr);
	}
	
	return MPI_SUCCESS;
}

int MPI_Op_create(MPI_User_function *function, int commute, MPI_Op *op)
{
	*op = OOM_op_create(function, commute);
	return MPI_SUCCESS;
}

int MPI_Op_free(MPI_Op *op)
{
	OOM_op_destroy(*op);
	*op = MPI_OP_NULL;
	return MPI_SUCCESS;
}

/*
 * ************************************
 * \author Vijaya Dudekonda 
 * \brief Performs a reduction operation on data from each process
 *        in a Communicator and replicates the result of the operation
 *        to each process in that Communicator.
 ***********************************
 */

int MPI_Allreduce(void *sendbuf, void *recvbuf, int count,
	MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
{
	MPI_Reduce(sendbuf,recvbuf,count,datatype,op,0,comm);
	MPI_Bcast(recvbuf,count,datatype,0,comm);
	return MPI_SUCCESS;
}

/*
 * ************************************
 * \author Vijaya Dudekonda 
 * \brief Performes a erduction opeartion on data from each process and scatters
 * 	  the result to each process in that Communicator.
 ***********************************
 */

int MPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
	MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
{
	
	Comm *real_comm = NULL;
	Group *real_group = NULL;
	Request *real_request = NULL;
	Op *real_op = NULL;
	MPI_Request request;
	int my_group_rank;
	int *displs;
	int group_size;
	int count=0,i;	
	real_comm = OOM_comm_lookup(comm);
	if( real_comm == NULL ) return MPI_ERR_COMM;
	real_group = OOM_group_lookup(real_comm->group);
	if( real_group == NULL ) return MPI_ERR_COMM;
	real_op = OOM_op_lookup(op);
	if( real_op == NULL ) return MPI_ERR_OP;
	
	my_group_rank = real_comm->source;
	group_size = real_group->num_members;
	
	displs = (int *)malloc(sizeof(int) * group_size);
	char * crecv=recvbuf;
	char * csend=sendbuf;	
	
	for(i = 0; i < group_size; i++)
		count = count + recvcounts[i];
	MPI_Reduce(csend,crecv,count,datatype,op,0,comm);
	
	displs[0] = 0;
	for( i=1; i < group_size; i++)
		displs[i] = displs[i-1] + recvcounts[i-1];
	
	MPI_Scatterv(crecv,recvcounts,displs,datatype,recvbuf,recvcounts[my_group_rank],datatype,0,comm);                                
	
	return MPI_SUCCESS;
}

int MPI_Scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
	MPI_Op op, MPI_Comm comm)
{
	return MPI_SUCCESS;
}

/*
 * ************************************
 * \author Vijaya Dudekonda, James Edmondson 
 * \brief Gathers data from processes in a Communicator and places that
 *        data at user specified displacements in the root's recvbuf.
 ***********************************
 */

int MPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
	void *recvbuf, int *recvcounts, int *displs, MPI_Datatype recvtype,
	int root, MPI_Comm comm)
{
	
	Comm *real_comm;
	Group *real_group = NULL;
	Request *real_request;
	MPI_Request request;
	Request *requests = NULL;
	int my_group_rank;
	int tsource = root;
	int tag = 1,i=0,cur=0;
	int ierr;
	char * crecv = recvbuf;
	MPI_Aint recvtype_extent;
	
	real_comm = OOM_comm_lookup(comm);
	if (real_comm == NULL) return MPI_ERR_COMM;
	real_group = OOM_group_lookup(real_comm->group);
	if (real_group == NULL) return MPI_ERR_COMM;
	
	my_group_rank = real_comm->source;

	ierr = MPI_Type_extent(recvtype, &recvtype_extent);
	if (ierr != MPI_SUCCESS) return MPI_ERR_TYPE;

	if (root == my_group_rank)
	{
		for (i = 0, cur = displs[i]* recvtype_extent; 
				i < real_group->num_members; 
				i++,cur = displs[i] * recvtype_extent)
		{	
			if( i!= root )
			{
				CM_Recv(crecv + cur,recvcounts[i],recvtype,i,root,
					real_comm->coll_context,tag,&request);
			
				real_request = OOM_request_lookup(request);
			
				CM_Wait(real_request,&tsource,&tag,&ierr);
		
			}

			else
			{
				memcpy(crecv + cur,sendbuf,
						recvcounts[i]*sizeof_base_type(recvtype));
			
			}
			
		}
	}
	else
	{
        	CM_Send(sendbuf,sendcount,sendtype,my_group_rank,root,
				real_comm->coll_context,1,real_group->members[root],&request);
		real_request = OOM_request_lookup(request);
	        CM_Wait(real_request,&tsource,&tag,&ierr);
	}	
	return MPI_SUCCESS;
}
