/*
 *  Communicator.h
 *  PX
 *
 *  Created by Patrick Zulian on 12/22/10.
 *  
 *
 */

#ifndef NSE_SOLVER_MPI_COMMUNICATOR_H
#define NSE_SOLVER_MPI_COMMUNICATOR_H

#define NSE_SOLVER_CATCH_ERROR(x) x

#include <mpi.h>
#include <assert.h>

#include "Base.h"
#include "Utils.h"
#include "MPIWrap.h"




namespace nsesolver {
	namespace mpi {
		
		
		///General template for mpi datatypes
		template< typename T >
		static MPI_Datatype GetMPIDataType();
		
		template<>
		inline static MPI_Datatype GetMPIDataType< double >()
		{
			return MPI_DOUBLE;
		}
        
        template<>
		inline static MPI_Datatype GetMPIDataType< long >()
		{
			return MPI_LONG;
		}
        
        template<>
		inline static MPI_Datatype GetMPIDataType< float >()
		{
			return MPI_FLOAT;
		}

        template<>
		inline static MPI_Datatype GetMPIDataType< int >()
		{
			return MPI_INT;
		}
/**
 * \brief MPI wrapper for the communication
 */

class Communicator {
private:
	MPI_Comm mpiComm;
	int rank, numProcs;
	std::vector< MPI_Request >  sendReqs, recvReqs; // use &sendReqs[0] to pass it to the mpi wait routine 
	
	


	
		
	
	
	
public:
	
	Communicator(MPI_Comm mpiComm = mpi::WORLD)
	: mpiComm(mpiComm), rank(0), numProcs(0), sendReqs(), recvReqs()
	{
		NSE_SOLVER_CATCH_ERROR( MPI_Comm_rank(mpiComm, &rank) );
		NSE_SOLVER_CATCH_ERROR( MPI_Comm_size(mpiComm, &numProcs) );
		
		assert(getRank() < size());
	}
	
	Communicator(const Communicator &other)
	: mpiComm(other.mpiComm), rank(other.rank), numProcs(other.numProcs), 
	  sendReqs(other.sendReqs), recvReqs(other.recvReqs)
	{ }
	
	virtual
	~Communicator() 
	{ }
	
	
	
	
	Communicator & operator = (const Communicator &other)
	{
		if (this == &other) {
			return *this;
		}
		
		NSE_SOLVER_ASSERT(!other.hasPendingRequests());
		
	
		mpiComm = other.mpiComm;
		sendReqs = other.sendReqs;
		recvReqs = other.recvReqs;
		rank = other.rank;
		numProcs = other.numProcs;

		return *this;
	}
	
	/**
	 * @param buffer the outgoing message
	 * @param size the size of the outgoing message
	 * @param to the rank of the remote process
	 * @param tag the identifier of the communication
	 */
	template< typename T >
	void iSend(T * buffer, const int size, const int to, const int tag)
	{
		sendReqs.push_back(MPI_REQUEST_NULL);
		NSE_SOLVER_CATCH_ERROR(MPI_Isend(buffer, size, GetMPIDataType< T >(), to, tag, mpiComm, &sendReqs.back()));	
	}
	
	/**
	 * @param buffer[OUT] the incoming message
	 * @param size the size of the incoming message
	 * @param to the rank of the remote process
	 * @param tag the identifier of the communication
	 */
	template< typename T >
	void iRecv(T * buffer, const int size, const int from, const int tag)
	{
		recvReqs.push_back(MPI_REQUEST_NULL);
		NSE_SOLVER_CATCH_ERROR(MPI_Irecv(buffer, size, GetMPIDataType< T >(), from, tag, mpiComm, &recvReqs.back()));
	}
	
	/**
	 * @param recvBuffer[OUT] the incoming message
	 * @param sendBuffer the outgoing message
	 * @param size the size of the incoming/outgoing message
	 * @pram from the rank of the sending remote process
	 * @param to the rank of the receiving remote process
	 */
	template< typename T >
	void iSendRecv(T * recvBuffer, T * sendBuffer, const int size, const int with)
	{		
		NSE_SOLVER_CATCH_ERROR(iSend(sendBuffer, size, with, with));
		NSE_SOLVER_CATCH_ERROR(iRecv(recvBuffer, size, with, rank));
	}
    
    
    template< typename T >
	void sendRecv(T * recvBuffer, T * sendBuffer, const int size, const int with)
	{		
        //MPI_SENDRECV(SENDBUF, SENDCOUNT, SENDTYPE, DEST, SENDTAG, RECVBUF, RECVCOUNT, RECVTYPE, SOURCE, RECVTAG, COMM, STATUS, IERROR)
		NSE_SOLVER_CATCH_ERROR(MPI_Sendrecv(sendBuffer, size, GetMPIDataType< T >(), with, with, recvBuffer, size, GetMPIDataType< T >(), with, rank, mpiComm, MPI_STATUS_IGNORE));
	}
	
	///wait for all the communication to be finished
	inline
	void waitAll()
	{
		waitAllRecv();
		waitAllSend();
	}
	
	///@return true if not all the requests are completed
	inline
	bool hasPendingRequests() const
	{
		return !recvReqs.empty() || !sendReqs.empty();
	}
	
	///Waits for all the incoming communication to be finished
	void waitAllRecv()
	{
		if (recvReqs.size() == 1) {
			int flag;
			NSE_SOLVER_CATCH_ERROR(MPI_Test(&recvReqs[0], &flag, MPI_STATUS_IGNORE));
			if(!flag) {
				NSE_SOLVER_CATCH_ERROR(MPI_Wait(&recvReqs[0], MPI_STATUS_IGNORE));
			}
		} else {
			NSE_SOLVER_CATCH_ERROR(MPI_Waitall(recvReqs.size(), &recvReqs[0], MPI_STATUSES_IGNORE));
		}
		
		recvReqs.clear();
	}
	
	//Waits for all the outgoing communication to be finished
	void waitAllSend()
	{
		if (sendReqs.size() == 1) {
			int flag;
			NSE_SOLVER_CATCH_ERROR(MPI_Test(&sendReqs[0], &flag, MPI_STATUS_IGNORE));
			
			if(!flag) {
				NSE_SOLVER_CATCH_ERROR(MPI_Wait(&sendReqs[0], MPI_STATUS_IGNORE));
			}
		} else {
			
			NSE_SOLVER_CATCH_ERROR(MPI_Waitall(sendReqs.size(), &sendReqs[0], MPI_STATUSES_IGNORE));
		}
		
		sendReqs.clear();
	}
	
	/**
	 * @param sendBuffer the data sent to all the other processes of this communicator
	 * @param recvBuffer the data received from all the other processes of this communicator
	 */
	template< typename T >
	void allGather(T * sendBuffer, T * recvBuffer, const int size) const
	{
		NSE_SOLVER_CATCH_ERROR(MPI_Allgather(sendBuffer, size, GetMPIDataType< T >(), recvBuffer, size, GetMPIDataType< T >(), mpiComm));		
	}
	
	
	/**
	 * @param the param[IN,OUT] and result of the global reduction
	 * @param op the associative operation performed
	 */
	template< typename T >
	void allReduce(T * val, const int size, MPI_Op op) const
	{
		NSE_SOLVER_CATCH_ERROR(MPI_Allreduce(MPI_IN_PLACE, val, size, GetMPIDataType< T >(), op, mpiComm));
	}
	
	///@return true if the communicator has size 1
	inline
	bool isAlone() const
	{
		return size() == 1;
	}
	
	///@return the rank of this process in this communicator
	inline
	int getRank() const 
	{
		return rank;
	}
	
	///@return the size of the communicator
	inline
	int size() const
	{
		return numProcs;
	}
	
	///@return true if the rank of this process in this communicator is 0
	inline 
	bool isRoot() const
	{
		return rank == 0;
	}
	
	///@return the wrapped mpi communicator object
	inline
	MPI_Comm getMPIComm() const
	{
		return mpiComm;
	}
	
    inline
	void setMPIComm(MPI_Comm mpiComm)
	{
		this->mpiComm = mpiComm;
        NSE_SOLVER_CATCH_ERROR( MPI_Comm_rank(mpiComm, &rank) );
		NSE_SOLVER_CATCH_ERROR( MPI_Comm_size(mpiComm, &numProcs) );
		
	}
	
    
	/**
	 *	@param source the rank of the process for which we test if a send has been posted
	 *	@return the count of the data associated with size T
	 */
	template< typename T >
	int iProbe(int source)
	{
		int flag;
		MPI_Status status;
		NSE_SOLVER_CATCH_ERROR( MPI_Iprobe( source, rank, mpiComm, &flag, &status) );
		
		if(!flag)
			return -1;
		
		int count;
		MPI_Get_count(&status, GetMPIDataType< T >(), &count);
		
		return count;
		
	}
	
	void barrier()
	{
		MPI_Barrier(mpiComm);
	}

	
	/**
	 *	@tparam T the datatype which is expected from the message
	 *	@param source[OUT] the rank of the process that posted a message send
	 *	@return the count of the data associated with size T
	 */
	template< typename T >
	int iProbeAny(int * source)
	{
		*source = MPI_PROC_NULL;
		
		int flag;
		MPI_Status status;
		NSE_SOLVER_CATCH_ERROR( MPI_Iprobe( MPI_ANY_SOURCE, rank, mpiComm, &flag, &status) );
		
		if(!flag)
			return -1;
		
		*source = status.MPI_SOURCE;
		
		int count;
		MPI_Get_count(&status, GetMPIDataType< T >(), &count);
		
		return count;
		
	}
	
	/**
	 * @return the rank of the process associated with the complete message transfer or -1
	 */
	inline int testRecvAny()
	{
		int index, flag;
		MPI_Status status;
		
		NSE_SOLVER_CATCH_ERROR( MPI_Testany(recvReqs.size(), &recvReqs[0], &index, &flag, &status) );
		
		if (!flag) {
			return MPI_PROC_NULL;
		}
		
		return status.MPI_SOURCE;
	}
	
	
	
	
	inline bool isValid(int rank)
	{
		return (rank >= 0) && (rank < size());
 	}
	
	
	static inline bool isNull(int rank)
	{
		return rank == MPI_PROC_NULL;
	}
	
	
	//Output
	friend
	std::ostream & operator <<(std::ostream &os, const Communicator &comm)
	{
		os << "[" << comm.getRank() << "] ";
		return os;
	}
	
};


}
	
}


#endif
