/*************************************************************************
 *                                                                       *
 * This file is part of the DogosFrame distribution.  For full copyright *
 * information, see COPYRIGHT and COPYING.LESSER.                        *
 *                                                                       *
 * Author:           sqbang                                              *
 * Email:            sqbang@ustc.edu                                     *
 *                                                                       *
 *************************************************************************/

#ifndef DOGOS_include_COMMON_para_Parallel_h
#define DOGOS_include_COMMON_para_Parallel_h

#include "DogosConfig.h"

//C++ headers
#include <vector>
#include <map>
#include <set>

//COMMON headers
#include "COMMON/base/Common.h"
#include "COMMON/math/TypeVector.h"
#include "COMMON/para/DOGOS_MPI.h"

namespace DOGOS
{

namespace Parallel
{

#ifdef DOGOS_HAVE_MPI

/**
 * Forward declarations of classes we will define later.
 */
class DataType;
class Request;
class Status;

/**
 * Data types for communication
 */
typedef MPI_Datatype data_type;

/**
 * Request object for non-blocking I/O
 */
typedef MPI_Request request;

/**
 * Status object for querying messages
 */
typedef MPI_Status status;

/**
 * Templated function to return the appropriate MPI datatype
 * for use with built-in C types
 */
template <typename T>
data_type datatype();

/**
 * Default message tag id
 */
const int any_tag = MPI_ANY_TAG;

/**
 * Accept from any source
 */
const int any_source = MPI_ANY_SOURCE;

/**
 * Encapsulates the MPI_Datatype.
 */
class DataType
{
public:

    DataType()
    {}

    DataType (
        const DataType& rhs) :
        d_datatype (rhs.d_datatype)
    {}

    DataType (
        const data_type& type) :
        d_datatype (type)
    {}

    DataType&
    operator = (
        const DataType& rhs)
    {
        d_datatype = rhs.d_datatype;
        return *this;
    }

    DataType&
    operator = (
        const data_type& type)
    {
        d_datatype = type;
        return *this;
    }

    operator const data_type& () const
    {
        return d_datatype;
    }

    operator data_type& ()
    {
        return d_datatype;
    }

    void
    commit()
    {
        MPI_Type_commit (&d_datatype);
    }

    void
    free()
    {
        MPI_Type_free (&d_datatype);
    }

private:

    data_type d_datatype;
};



/**
 * Encapsulates the MPI_Status struct.  Allows the source and size
 * of the message to be determined.
 */
class Status
{
public:

    Status()
    {}

    Status (
        const data_type& type) :
        d_datatype (type)
    {}

    Status (
        const status& status_v) :
        d_status (status_v)
    {}

    Status (
        const status&    status_v,
        const data_type& type) :
        d_status (status_v),
        d_datatype (type)
    {}

    Status (
        const Status& status_v) :
        d_status (status_v.d_status),
        d_datatype (status_v.d_datatype)
    {}

    Status (
        const Status&    status_v,
        const data_type& type) :
        d_status (status_v.d_status),
        d_datatype (type)
    {}

    operator status* ()
    {
        return &d_status;
    }

    operator status const* () const
    {
        return &d_status;
    }

    int
    source() const
    {
        return d_status.MPI_SOURCE;
    }

    int
    tag() const
    {
        return d_status.MPI_TAG;
    }

    data_type&
    datatype()
    {
        return d_datatype;
    }

    const data_type&
    datatype() const
    {
        return d_datatype;
    }

    unsigned int
    size (
        const data_type& type) const
    {
        int msg_size;
        MPI_Get_count (const_cast<MPI_Status*>(&d_status), type, &msg_size);
        TBOX_ASSERT (msg_size >= 0);
        return msg_size;
    }

    unsigned int
    size() const
    {
        return this->size (this->datatype());
    }

private:

    status    d_status;
    data_type d_datatype;
};

/**
 * Encapsulates the MPI_Request
 */
class Request
{
public:

    Request()
    {
        d_request = MPI_REQUEST_NULL;
    }

    Request (
        const request& r) :
        d_request (r)
    {}

    Request&
    operator = (
        const Request& rhs)
    {
        d_request = rhs.d_request;
        return *this;
    }

    Request&
    operator = (
        const request& r)
    {
        d_request = r;
        return *this;
    }

    ~Request()
    {
        // explicitly free this request if not
        // done so already, otherwise this would
        // be a memory leak!
        if (d_request != MPI_REQUEST_NULL)
            MPI_Request_free (&d_request);
    }

    operator const request& () const
    {
        return d_request;
    }

    operator request& ()
    {
        return d_request;
    }

    status
    wait()
    {
        status status_value;
        MPI_Wait (&d_request, &status_value);
        return status_value;
    }

    bool
    test()
    {
        int val = 0;

        MPI_Test (
            &d_request,
            &val,
            MPI_STATUS_IGNORE);

        if (val)
        {
            TBOX_ASSERT (d_request == MPI_REQUEST_NULL);
            TBOX_ASSERT (val == 1);
        }

        return val;
    }

    bool
    test (
        status& status_value)
    {
        int val = 0;

        MPI_Test (
            &d_request,
            &val,
            &status_value);

        return val;
    }

private:

    request d_request;
};

#else

typedef int data_type;
typedef int DataType;
typedef int request;
typedef int Request;
typedef int status;
typedef int Status;

const int any_tag = -1;
const int any_source = -1;

#endif

/**
 * Pause execution until all processors reach a certain point.
 */
inline void
barrier();

/**
 * Verify that a local variable has the same value on all processors
 */
template <typename T>
inline bool
verify (
    const T& r);

/**
 * Take a local variable and replace it with the minimum of it's values
 * on all processors
 */
template <typename T>
inline void
min (
    T& r);

/**
 * Take a vector of local variables and replace each entry with the minimum
 * of it's values on all processors
 */
template <typename T>
inline void
min (
    std::vector<T>& r);

/**
 * Take a local variable and replace it with the maximum of it's values
 * on all processors
 */
template <typename T>
inline void
max(
    T& r);

/**
 * Take a vector of local variables and replace each entry with the maximum
 * of it's values on all processors
 */
template <typename T>
inline void
max(
    std::vector<T>& r);

/**
 * Take a local variable and replace it with the sum of it's values
 * on all processors
 */
template <typename T>
inline void
sum(
    T& r);

/**
 * Take a vector of local variables and replace each entry with the sum of
 * it's values on all processors
 */
template <typename T>
inline void
sum(
    std::vector<T>& r);

/**
 * Blocking message probe.  Allows information about a message to be
 * examined before the message is actually received.
 */
inline status
probe (
    const int src_processor_id,
    const int tag = any_tag);

/**
 * Blocking-send vector to one processor with user-defined type.
 */
template <typename T>
inline void
send (
    const int dest_processor_id,
    std::vector<T>& buf,
    const DataType& type,
    const int tag = 0);

/**
 * Nonblocking-send vector to one processor with user-defined type.
 */
template <typename T>
inline void
send (
    const int dest_processor_id,
    std::vector<T>& buf,
    const DataType& type,
    request& req,
    const int tag = 0);

/**
 * Blocking-send vector to one processor where the communication type
 * is inferred from the template argument.
 */
template <typename T>
inline void
send (
    const int dest_processor_id,
    std::vector<T>& buf,
    const int tag = 0);

// Function overloading for std::complex<>
template <typename T>
inline void
send (
    const int dest_processor_id,
    std::vector<std::complex<T> >& buf,
    const int tag = 0);

/**
 * Nonblocking-send vector to one processor where the communication type
 * is inferred from the template argument.
 */
template <typename T>
inline void
send (
    const int dest_processor_id,
    std::vector<T>& buf,
    request& req,
    const int tag = 0);

// Function overloading for std::complex<>
template <typename T>
inline void
send (
    const int dest_processor_id,
    std::vector<std::complex<T> >& buf,
    request& req,
    const int tag = 0);

/**
 * Nonblocking-send vector to one processor with user-defined type.
 */
template <typename T>
inline void
nonblocking_send (
    const int dest_processor_id,
    std::vector<T>& buf,
    const DataType& type,
    request& r,
    const int tag = 0);

/**
 * Nonblocking-send vector to one processor.
 */
template <typename T>
inline void
nonblocking_send (
    const int dest_processor_id,
    std::vector<T>& buf,
    request& r,
    const int tag = 0);

template <typename T>
inline void
nonblocking_send (
    const int dest_processor_id,
    std::vector<std::complex<T> >& buf,
    request& r,
    const int tag = 0);

/**
 * Blocking-receive vector from one processor with user-defined type.
 */
template <typename T>
inline Status
receive (
    const int src_processor_id,
    std::vector<T>& buf,
    const DataType& type,
    const int tag = any_tag);

/**
 * Nonblocking-receive vector from one processor with user-defined type.
 */
template <typename T>
inline void
receive (
    const int src_processor_id,
    std::vector<T>& buf,
    const DataType& type,
    request& req,
    const int tag = any_tag);

/**
 * Blocking-receive vector from one processor where the communication type
 * is inferred from the template argument.
 */
template <typename T>
inline Status
receive (
    const int src_processor_id,
    std::vector<T>& buf,
    const int tag = any_tag);

// Function overloading for std::complex<>
template <typename T>
inline Status
receive (
    const int src_processor_id,
    std::vector<std::complex<T> >& buf,
    const int tag = any_tag);

/**
 * Nonblocking-receive vector from one processor where the communication type
 * is inferred from the template argument.
 */
template <typename T>
inline void
receive (
    const int src_processor_id,
    std::vector<T>& buf,
    request& req,
    const int tag = any_tag);

// Function overloading for std::complex<>
template <typename T>
inline void
receive (
    const int src_processor_id,
    std::vector<std::complex<T> >& buf,
    request& req,
    const int tag = any_tag);

/**
 * Nonblocking-receive vector from one processor with user-defined type
 */
template <typename T>
inline void
nonblocking_receive (
    const int src_processor_id,
    std::vector<T>& buf,
    const DataType& type,
    request& r,
    const int tag = any_tag);

/**
 * Nonblocking-receive vector from one processor.
 */
template <typename T>
inline void
nonblocking_receive (
    const int src_processor_id,
    std::vector<T>& buf,
    request& r,
    const int tag = any_tag);

template <typename T>
inline void
nonblocking_receive (
    const int src_processor_id,
    std::vector<std::complex<T> >& buf,
    request& r,
    const int tag = any_tag);

/**
 * Wait for a non-blocking send or receive to finish
 */
inline status
wait (
    request& r);

/**
 * Wait for a non-blocking send or receive to finish
 */
inline void
wait (
    std::vector<request>& r);

/**
 * Wait for a non-blocking send or receive to finish
 */
inline void
wait (
    std::vector<Request>& r);

/**
 * Send vector \p send to one processor while simultaneously receiving
 * another vector \p recv from a (potentially different) processor.
 */
template <typename T>
inline void
send_receive (
    const int dest_processor_id,
    T& send,
    const int source_processor_id,
    T& recv);

/**
 * Send vector \p send to one processor while simultaneously receiving
 * another vector \p recv from a (potentially different) processor using
 * a user-specified MPI Dataype.
 */
template <typename T>
inline void
send_receive (
    const int dest_processor_id,
    T& send,
    const int source_processor_id,
    T& recv,
    const DataType& type);

/**
 * Take a vector of length n_processors, and on processor root_id fill in
 * recv[processor_id] = the value of send on processor processor_id
 */
template <typename T>
inline void
gather(
    const int root_id,
    T send,
    std::vector<T>& recv);

/**
 * This function provides a convenient method
 * for combining vectors from each processor into one
 * contiguous chunk on one processor.  This handles the
 * case where the lengths of the vectors may vary.
 * Specifically, this function transforms this:
 \verbatim
  Processor 0: [ ... N_0 ]
  Processor 1: [ ....... N_1 ]
    ...
  Processor M: [ .. N_M]
 \endverbatim
 *
 * into this:
 *
 \verbatim
 [ [ ... N_0 ] [ ....... N_1 ] ... [ .. N_M] ]
 \endverbatim
 *
 * on processor root_id. This function is collective and therefore
 * must be called by all processors.
 */
template <typename T>
inline void
gather(
    const int root_id,
    std::vector<T>& r);

/**
 * gather a distributed set
 */
template <typename T>
inline void
gather(
    const int root_id,
    std::set<T>& r);

/**
 * gather a distributed map
 */
template <typename T1, typename T2>
inline void
gather(
    const int root_id,
    std::map<T1, T2>& map);

/**
 * Take a vector of length n_processors, and fill in
 * \p recv[processor_id] = the value of \p send on that processor
 */
template <typename T>
inline void
allgather(
    T send,
    std::vector<T>& recv);

/**
 * Take a vector of local variables and expand it to include
 * values from all processors. By default, each processor is
 * allowed to have its own unique input buffer length. If
 * it is known that all processors have the same input sizes
 * additional communication can be avoided.
 */
template <typename T>
inline void
allgather(
    std::vector<T>& r,
    const bool identical_buffer_sizes = false);

/**
 * Take a set of local variables and expand it to include
 * values from all processors
 */
template <typename T>
inline void
allgather(
    std::set<T>& r);

/**
 * allgather a distributed map
 */
template <typename T1, typename T2>
inline void
allgather(
    std::map<T1, T2>& map);

/**
 * allgather a distributed map whose elements are map
 */
template <typename T1, typename T2>
inline void
allgather(
    std::vector<std::map<T1, T2> >& r);

/**
 * Effectively transposes the input vector across all processors.
 * The jth entry on processor i is replaced with the ith entry
 * from processor j.
 */
template <typename T>
inline void
alltoall(
    std::vector<T>& r);

/**
 * Take a local value and broadcast it to all processors.
 * Optionally takes the \p root_id processor, which specifies
 * the processor intiating the broadcast.
 */
template <typename T>
inline void
broadcast (
    T& data,
    const int root_id);

/**
 * @brief MPI Bcast (does nothing when MPI is disabled).
 */
template <typename T>
inline void
broadcast (
    T* data,
    int count,
    int root_id);

/**
 * Take a local vector and broadcast it to all processors.
 * Optionally takes the \p root_id processor, which specifies
 * the processor intiating the broadcast.  The user is responsible
 * for appropriately sizing the input buffer on all processors.
 */
template <typename T>
inline void
broadcast (
    std::vector<T>& data,
    const int root_id);

} //namespace Parallel

}//namespace DOGOS

#include "COMMON/para/Parallel.inl"

#endif //DOGOS_include_COMMON_para_Parallel_h
