/*************************************************************************
 *                                                                       *
 * This file is part of the DogosFrame distribution.  For full copyright *
 * information, see COPYRIGHT and COPYING.LESSER.                        *
 *                                                                       *
 * Author:           sqbang                                              *
 * Email:            sqbang@ustc.edu                                     *
 *                                                                       *
 *************************************************************************/

namespace DOGOS
{

namespace Parallel
{

/*
 *************************************************************************
 *                                                                       *
 * Internal helper function to create vector<something_useable> from     *
 * vector<bool> for compatibility with MPI bitwise operations            *
 *                                                                       *
 *************************************************************************
 */
#ifdef DOGOS_HAVE_MPI
template <typename T>
inline void
packVectorBool(
    const std::vector<bool> &in,
    std::vector<T> &out)
{
    unsigned int data_bits = 8*static_cast<unsigned int>(sizeof(T));
    unsigned int in_size   = static_cast<unsigned int>(in.size());
    unsigned int out_size  = in_size/data_bits + (in_size%data_bits?1:0);
    out.clear();
    out.resize (out_size);
    for (unsigned int i=0; i != in_size; ++i)
    {
        unsigned int index = i/data_bits;
        unsigned int offset = i%data_bits;
        out[index] += (in[i]?1:0) << offset;
    }
}


/*
 *************************************************************************
 *                                                                       *
 * Internal helper function to create vector<something_useable> from     *
 * vector<bool> for compatibility with MPI byte operations               *
 *                                                                       *
 *************************************************************************
 */

template <typename T>
inline void
unpackVectorBool(
    const std::vector<T> &in,
    std::vector<bool> &out)
{
    unsigned int data_bits = 8*static_cast<unsigned int>(sizeof(T));
    // We need the output vector to already be properly sized
    unsigned int out_size = static_cast<unsigned int>(out.size());
    TBOX_ASSERT (out_size/data_bits + (out_size%data_bits?1:0) == in.size());

    for (unsigned int i=0; i != out_size; ++i)
    {
        unsigned int index = i/data_bits;
        unsigned int offset = i%data_bits;
        out[i] = in[index] << (data_bits-1-offset) >> (data_bits-1);
    }
}


template<>
inline data_type
datatype<char>()
{
    return MPI_CHAR;
}


template<>
inline data_type
datatype<unsigned char>() {
    return MPI_UNSIGNED_CHAR;
}


template<>
inline data_type
datatype<short int>() {
    return MPI_SHORT;
}


template<>
inline data_type
datatype<unsigned short int>() {
    return MPI_UNSIGNED_SHORT;
}


template<>
inline data_type
datatype<int>() {
    return MPI_INT;
}


template<>
inline data_type
datatype<unsigned int>() {
    return MPI_UNSIGNED;
}


template<>
inline data_type
datatype<long>() {
    return MPI_LONG;
}


template<>
inline data_type
datatype<unsigned long>() {
    return MPI_UNSIGNED_LONG;
}


template<>
inline data_type
datatype<float>() {
    return MPI_FLOAT;
}


template<>
inline data_type
datatype<double>() {
    return MPI_DOUBLE;
}


template<>
inline data_type
datatype<long double>() {
    return MPI_LONG_DOUBLE;
}
#endif


inline void
barrier()
{
#ifdef DOGOS_HAVE_MPI
    MPI_Barrier (DOGOS_MPI::get_MPI_world().get_communicator());
#else
    /*
     * do nothing
     */
#endif
}


template <typename T>
inline bool
verify (
    const T &r)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() > 1)
    {
        T tempmin = r, tempmax = r;
        Parallel::min (tempmin);
        Parallel::max(tempmax);
        bool verified = (r == tempmin) &&
                        (r == tempmax);
        Parallel::min (verified);
        return verified;
    }
    return true;
#else
    (void)r;
    return true;
#endif
}


template <>
inline bool
verify (
    const std::string & r)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() > 1)
    {
        // Cannot use <char> since MPI_MIN is not
        // strictly defined for chars!
        std::vector<short int> temp;
        temp.reserve (r.size());
        for (unsigned int i=0; i != r.size(); ++i)
            temp.push_back(r[i]);
        return Parallel::verify (temp);
    }
    return true;
#else
    (void)r;
    return true;
#endif
}


template <typename T>
inline void
min (
    T &r)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() > 1)
    {
        T temp = r;
        MPI_Allreduce (&temp,
                       &r,
                       1,
                       datatype<T>(),
                       MPI_MIN,
                       DOGOS_MPI::get_MPI_world().get_communicator());
    }
#else
    (void)r;
#endif
}


template <>
inline void
min (
    bool &r)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() > 1)
    {
        unsigned int tempsend = r;
        unsigned int temp;
        MPI_Allreduce (&tempsend,
                       &temp,
                       1,
                       datatype<unsigned int>(),
                       MPI_MIN,
                       DOGOS_MPI::get_MPI_world().get_communicator());
        r = temp;
    }
#else
    (void)r;
#endif
}


template <typename T>
inline void
min (
    std::vector<T> &r)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() > 1 && !r.empty())
    {
        std::vector<T> temp(r);
        MPI_Allreduce (&temp[0],
                       &r[0],
                       static_cast<int>(r.size()),
                       datatype<T>(),
                       MPI_MIN,
                       DOGOS_MPI::get_MPI_world().get_communicator());
    }
#else
    (void)r;
#endif
}


template <>
inline void
min (
    std::vector<bool> &r)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() > 1 && !r.empty())
    {
        std::vector<unsigned int> ruint;
        packVectorBool (r, ruint);
        std::vector<unsigned int> temp(ruint.size());
        MPI_Allreduce (&ruint[0],
                       &temp[0],
                       static_cast<int>(ruint.size()),
                       datatype<unsigned int>(),
                       MPI_BAND,
                       DOGOS_MPI::get_MPI_world().get_communicator());
        unpackVectorBool (temp, r);
    }
#else
    (void)r;
#endif
}


template <typename T>
inline void
max(
    T &r)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() > 1)
    {
        T temp;
        MPI_Allreduce (&r,
                       &temp,
                       1,
                       datatype<T>(),
                       MPI_MAX,
                       DOGOS_MPI::get_MPI_world().get_communicator());
        r = temp;
    }
#else
    (void)r;
#endif
}


template <>
inline void
max(
    bool &r)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() > 1)
    {
        unsigned int tempsend = r;
        unsigned int temp;
        MPI_Allreduce (&tempsend,
                       &temp,
                       1,
                       datatype<unsigned int>(),
                       MPI_MAX,
                       DOGOS_MPI::get_MPI_world().get_communicator());
        r = temp;
    }
#else
    (void)r;
#endif
}


template <typename T>
inline void
max(
    std::vector<T> &r)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() > 1 && !r.empty())
    {
        std::vector<T> temp(r);
        MPI_Allreduce (&temp[0],
                       &r[0],
                       static_cast<int>(r.size()),
                       datatype<T>(),
                       MPI_MAX,
                       DOGOS_MPI::get_MPI_world().get_communicator());
    }
#else
    (void)r;
#endif
}


template <>
inline void
max(
    std::vector<bool> &r)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() > 1 && !r.empty())
    {
        std::vector<unsigned int> ruint;
        packVectorBool (r, ruint);
        std::vector<unsigned int> temp(ruint.size());
        MPI_Allreduce (&ruint[0],
                       &temp[0],
                       static_cast<int>(ruint.size()),
                       datatype<unsigned int>(),
                       MPI_BOR,
                       DOGOS_MPI::get_MPI_world().get_communicator());
        unpackVectorBool (temp, r);
    }
#else
    (void)r;
#endif
}


template <typename T>
inline void
sum(
    T &r)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() > 1)
    {
        T temp = r;
        MPI_Allreduce (&temp,
                       &r,
                       1,
                       datatype<T>(),
                       MPI_SUM,
                       DOGOS_MPI::get_MPI_world().get_communicator());
    }
#else
    (void)r;
#endif
}


template <typename T>
inline void
sum(
    std::vector<T> &r)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() > 1 && !r.empty())
    {
        std::vector<T> temp(r);
        MPI_Allreduce (&temp[0],
                       &r[0],
                       static_cast<int>(r.size()),
                       datatype<T>(),
                       MPI_SUM,
                       DOGOS_MPI::get_MPI_world().get_communicator());
    }
#else
    (void)r;
#endif
}


template <typename T>
inline void
sum(
    std::complex<T> &r)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() > 1)
    {
        std::complex<T> temp(r);
        MPI_Allreduce (&temp,
                       &r,
                       2,
                       datatype<T>(),
                       MPI_SUM,
                       DOGOS_MPI::get_MPI_world().get_communicator());
    }
#else
    (void)r;
#endif
}


template <typename T>
inline void
sum(
    std::vector<std::complex<T> > &r)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() > 1 && !r.empty())
    {
        std::vector<std::complex<T> > temp(r);
        MPI_Allreduce (&temp[0],
                       &r[0],
                       static_cast<int>(r.size()) * 2,
                       datatype<T>(),
                       MPI_SUM,
                       DOGOS_MPI::get_MPI_world().get_communicator());
    }
#else
    (void)r;
#endif
}


inline status
probe (
    const int src_processor_id,
    const int tag)
{
#ifdef DOGOS_HAVE_MPI
    status status_value;

    MPI_Probe (src_processor_id,
               tag,
               DOGOS_MPI::get_MPI_world().get_communicator(),
               &status_value);

    return status_value;
#else
    (void)src_processor_id;
    (void)tag;
    return -1;
#endif
}


template <typename T>
inline void
send (
    const int dest_processor_id,
    std::vector<T> &buf,
    const DataType &type,
    const int tag)
{
#ifdef DOGOS_HAVE_MPI
    // Only catch the return value when asserts are active.
    const int ierr =
        MPI_Send (buf.empty() ? NULL : &buf[0],
                  static_cast<int>(buf.size()),
                  type,
                  dest_processor_id,
                  tag,
                  DOGOS_MPI::get_MPI_world().get_communicator());

    TBOX_ASSERT (ierr == MPI_SUCCESS);
#else
    (void)dest_processor_id;
    (void)buf;
    (void)type;
    (void)tag;
#endif
}


/*
 *************************************************************************
 *                                                                       *
 * This is both a declaration and definition for a new overloaded        *
 * function template, so we have to re-specify the default argument      *
 *                                                                       *
 *************************************************************************
 */

template <typename T>
inline void
send (
    const int dest_processor_id,
    std::vector<std::complex<T> > &buf,
    const DataType &type,
    const int tag=0)
{
#ifdef DOGOS_HAVE_MPI
    // Only catch the return value when asserts are active.
    const int ierr =
        MPI_Send (buf.empty() ? NULL : &buf[0],
                  static_cast<int>(buf.size()) * 2,
                  type,
                  dest_processor_id,
                  tag,
                  DOGOS_MPI::get_MPI_world().get_communicator());

    TBOX_ASSERT (ierr == MPI_SUCCESS);
#else
    (void)dest_processor_id;
    (void)buf;
    (void)type;
    (void)tag;
#endif
}


template <typename T>
inline void
send (
    const int dest_processor_id,
    std::vector<T> &buf,
    const DataType &type,
    request &req,
    const int tag)
{
#ifdef DOGOS_HAVE_MPI
    // Only catch the return value when asserts are active.
    const int ierr =
        MPI_Isend (buf.empty() ? NULL : &buf[0],
                   static_cast<int>(buf.size()),
                   type,
                   dest_processor_id,
                   tag,
                   DOGOS_MPI::get_MPI_world().get_communicator(),
                   &req);
    TBOX_ASSERT (ierr == MPI_SUCCESS);
#else
    (void)dest_processor_id;
    (void)buf;
    (void)type;
    (void)req;
    (void)tag;
#endif
}


/*
 *************************************************************************
 *                                                                       *
 * This is both a declaration and definition for a new overloaded        *
 * function template, so we have to re-specify the default argument      *
 *                                                                       *
 *************************************************************************
 */

template <typename T>
inline void
send (
    const int dest_processor_id,
    std::vector<std::complex<T> > &buf,
    const DataType &type,
    request &req,
    const int tag=0)
{
#ifdef DOGOS_HAVE_MPI
    // Only catch the return value when asserts are active.
    const int ierr =
        MPI_ISend (buf.empty() ? NULL : &buf[0],
                   static_cast<int>(buf.size()) * 2,
                   type,
                   dest_processor_id,
                   tag,
                   DOGOS_MPI::get_MPI_world().get_communicator(),
                   &req);
    TBOX_ASSERT (ierr == MPI_SUCCESS);
#else
    (void)dest_processor_id;
    (void)buf;
    (void)type;
    (void)req;
    (void)tag;
#endif
}


template <typename T>
inline void
send (
    const int dest_processor_id,
    std::vector<T> &buf,
    const int tag)
{
#ifdef DOGOS_HAVE_MPI
    send (dest_processor_id,
          buf,
          datatype<T>(),
          tag);
#else
    (void)dest_processor_id;
    (void)buf;
    (void)tag;
#endif
}


template <typename T>
inline void
send (
    const int dest_processor_id,
    std::vector<std::complex<T> > &buf,
    const int tag)
{
#ifdef DOGOS_HAVE_MPI
    send (dest_processor_id,
          buf,
          datatype<T>(),
          tag);
#else
    (void)dest_processor_id;
    (void)buf;
    (void)tag;
#endif
}


template <typename T>
inline void
send (
    const int dest_processor_id,
    std::vector<T> &buf,
    request &req,
    const int tag)
{
#ifdef DOGOS_HAVE_MPI
    send (dest_processor_id,
          buf,
          datatype<T>(),
          req,
          tag);
#else
    (void)dest_processor_id;
    (void)buf;
    (void)req;
    (void)tag;
#endif
}


template <typename T>
inline void
send (
    const int dest_processor_id,
    std::vector<std::complex<T> > &buf,
    request &req,
    const int tag)
{
#ifdef DOGOS_HAVE_MPI
    send (dest_processor_id,
          buf,
          datatype<T>(),
          req,
          tag);
#else
    (void)dest_processor_id;
    (void)buf;
    (void)req;
    (void)tag;
#endif
}


/*
 *************************************************************************
 *                                                                       *
 * Nonblocking-send vector to one processor with user-defined type.      *
 *                                                                       *
 *************************************************************************
 */

template <typename T>
inline void
nonblocking_send (
    const int dest_processor_id,
    std::vector<T> &buf,
    const DataType &type,
    request &r,
    const int tag)
{
#ifdef DOGOS_HAVE_MPI
    send (dest_processor_id,
          buf,
          type,
          r,
          tag);
#else
    (void)dest_processor_id;
    (void)buf;
    (void)type;
    (void)r;
    (void)tag;
#endif
}


template <typename T>
inline void
nonblocking_send (
    const int dest_processor_id,
    std::vector<T> &buf,
    request &r,
    const int tag)
{
#ifdef DOGOS_HAVE_MPI
    send (dest_processor_id,
          buf,
          datatype<T>(),
          r,
          tag);
#else
    (void)dest_processor_id;
    (void)buf;
    (void)r;
    (void)tag;
#endif
}


template <typename T>
inline void
nonblocking_send (
    const int dest_processor_id,
    std::vector<std::complex<T> > &buf,
    request &r,
    const int tag)
{
#ifdef DOGOS_HAVE_MPI
    send (dest_processor_id,
          buf,
          datatype<T>(),
          r,
          tag);
#else
    (void)dest_processor_id;
    (void)buf;
    (void)r;
    (void)tag;
#endif
}


template <typename T>
inline Status
receive (
    const int src_processor_id,
    std::vector<T> &buf,
    const DataType &type,
    const int tag)
{
#ifdef DOGOS_HAVE_MPI
    // Get the status of the message, explicitly provide the
    // datatype so we can later query the size
    Status status_value (Parallel::probe (src_processor_id, tag), type);

    buf.resize (status_value.size());
    // Only catch the return value when asserts are active.
    const int ierr =
        MPI_Recv (buf.empty() ? NULL : &buf[0],
                  static_cast<int>(buf.size()),
                  type,
                  src_processor_id,
                  tag,
                  DOGOS_MPI::get_MPI_world().get_communicator(),
                  status_value);
    TBOX_ASSERT (ierr == MPI_SUCCESS);

    return status_value;
#else
    (void)src_processor_id;
    (void)buf;
    (void)type;
    (void)tag;
    return -1;
#endif
}


/*
 *************************************************************************
 *                                                                       *
 * This is both a declaration and definition for a new overloaded        *
 * function template, so we have to re-specify the default argument      *
 *                                                                       *
 *************************************************************************
 */

template <typename T>
inline Status
receive (
    const int src_processor_id,
    std::vector<std::complex<T> > &buf,
    const DataType &type,
    const int tag=any_tag)
{
#ifdef DOGOS_HAVE_MPI
    // Get the status of the message, explicitly provide the
    // datatype so we can later query the size
    Status status_value (Parallel::probe (src_processor_id, tag), datatype<T>());

    TBOX_ASSERT (!(status_value.size()%2));
    buf.resize (status_value.size()/2);

    // Only catch the return value when asserts are active.
    const int ierr =
        MPI_Recv (buf.empty() ? NULL : &buf[0],
                  static_cast<int>(buf.size()) * 2,
                  type,
                  src_processor_id,
                  tag,
                  DOGOS_MPI::get_MPI_world().get_communicator(),
                  status_value);
    TBOX_ASSERT (ierr == MPI_SUCCESS);

    return status_value;
#else
    (void)src_processor_id;
    (void)buf;
    (void)type;
    (void)tag;
    return -1;
#endif
}


template <typename T>
inline void
receive (
    const int src_processor_id,
    std::vector<T> &buf,
    const DataType &type,
    request &req,
    const int tag)
{
#ifdef DOGOS_HAVE_MPI
    // Only catch the return value when asserts are active.
    const int ierr =
        MPI_Irecv (buf.empty() ? NULL : &buf[0],
                   static_cast<int>(buf.size()),
                   type,
                   src_processor_id,
                   tag,
                   DOGOS_MPI::get_MPI_world().get_communicator(),
                   &req);
    TBOX_ASSERT (ierr == MPI_SUCCESS);
#else
    (void)src_processor_id;
    (void)buf;
    (void)type;
    (void)req;
    (void)tag;
#endif
}


/*
 *************************************************************************
 *                                                                       *
 * This is both a declaration and definition for a new overloaded        *
 * function template, so we have to re-specify the default argument      *
 *                                                                       *
 *************************************************************************
 */

template <typename T>
inline void
receive (
    const int src_processor_id,
    std::vector<std::complex<T> > &buf,
    const DataType &type,
    request &req,
    const int tag=any_tag)
{
#ifdef DOGOS_HAVE_MPI
    const int ierr =
        MPI_Irecv (buf.empty() ? NULL : &buf[0],
                   static_cast<int>(buf.size()) * 2,
                   type,
                   src_processor_id,
                   tag,
                   DOGOS_MPI::get_MPI_world().get_communicator(),
                   &req);

    TBOX_ASSERT (ierr == MPI_SUCCESS);

    return;
#else
    (void)src_processor_id;
    (void)buf;
    (void)type;
    (void)req;
    (void)tag;
#endif
}


template <typename T>
inline Status
receive (
    const int src_processor_id,
    std::vector<T> &buf,
    const int tag)
{
#ifdef DOGOS_HAVE_MPI
    return receive (src_processor_id,
                    buf,
                    datatype<T>(),
                    tag);
#else
    (void)src_processor_id;
    (void)buf;
    (void)tag;
    return -1;
#endif
}


template <typename T>
inline Status
receive (
    const int src_processor_id,
    std::vector<std::complex<T> > &buf,
    const int tag)
{
#ifdef DOGOS_HAVE_MPI
    return receive (src_processor_id,
                    buf,
                    datatype<T>(),
                    tag);
#else
    (void)src_processor_id;
    (void)buf;
    (void)tag;
    return -1;
#endif
}


template <typename T>
inline void
receive (
    const int src_processor_id,
    std::vector<T> &buf,
    request &req,
    const int tag)
{
#ifdef DOGOS_HAVE_MPI
    receive (src_processor_id,
             buf,
             datatype<T>(),
             req,
             tag);
#else
    (void)src_processor_id;
    (void)buf;
    (void)req;
    (void)tag;
#endif
}


template <typename T>
inline void
receive (
    const int src_processor_id,
    std::vector<std::complex<T> > &buf,
    request &req,
    const int tag)
{
#ifdef DOGOS_HAVE_MPI
    receive (src_processor_id,
             buf,
             datatype<T>(),
             req,
             tag);
#else
    (void)src_processor_id;
    (void)buf;
    (void)req;
    (void)tag;
#endif
}


template <typename T>
inline void
nonblocking_receive (
    const int src_processor_id,
    std::vector<T> &buf,
    const DataType &type,
    request &r,
    const int tag)
{
#ifdef DOGOS_HAVE_MPI
    receive (src_processor_id,
             buf,
             type,
             r,
             tag);
#else
    (void)src_processor_id;
    (void)buf;
    (void)type;
    (void)r;
    (void)tag;
#endif
}


template <typename T>
inline void
nonblocking_receive (
    const int src_processor_id,
    std::vector<T> &buf,
    request &r,
    const int tag)
{
#ifdef DOGOS_HAVE_MPI
    receive (src_processor_id,
             buf,
             datatype<T>(),
             r,
             tag);
#else
    (void)src_processor_id;
    (void)buf;
    (void)r;
    (void)tag;
#endif
}


template <typename T>
inline void
nonblocking_receive (
    const int src_processor_id,
    std::vector<std::complex<T> > &buf,
    request &r,
    const int tag)
{
#ifdef DOGOS_HAVE_MPI
    receive (src_processor_id,
             buf,
             datatype<T>(),
             r,
             tag);
#else
    (void)src_processor_id;
    (void)buf;
    (void)r;
    (void)tag;
#endif
}


inline status
wait (
    request &r)
{
#ifdef DOGOS_HAVE_MPI
    status status_value;

    MPI_Wait (&r, &status_value);

    return status_value;
#else
    (void)r;
    return -1;
#endif
}


inline void
wait (
    std::vector<request> &r)
{
#ifdef DOGOS_HAVE_MPI
    MPI_Waitall (static_cast<int>(r.size()), r.empty() ? NULL : &r[0], MPI_STATUSES_IGNORE);
#else
    (void)r;
#endif
}


#ifdef DOGOS_HAVE_MPI
inline void
wait (
    std::vector<Request> &r)
{
    for (unsigned int i=0; i<r.size(); i++)
    {
        r[i].wait();
    }
}
#endif


template <typename T>
inline void
send_receive (
    const int dest_processor_id,
    std::vector<T> &send,
    const int source_processor_id,
    std::vector<T> &recv,
    const DataType &type)
{
#ifdef DOGOS_HAVE_MPI
    if (dest_processor_id   == DOGOS_MPI::get_MPI_world().get_rank() &&
            source_processor_id == DOGOS_MPI::get_MPI_world().get_rank())
    {
        recv = send;
        return;
    }

    Parallel::request request_value;

    Parallel::nonblocking_send (dest_processor_id,
                                send,
                                type,
                                request_value,
                                /* tag = */ 321);

    Parallel::receive (source_processor_id,
                       recv,
                       type,
                       /* tag = */ 321);

    Parallel::wait (request_value);
#else
    (void)dest_processor_id;
    (void)send;
    (void)source_processor_id;
    (void)recv;
    (void)type;
#endif
}


template <typename T>
inline void
send_receive (
    const int dest_processor_id,
    T &send,
    const int source_processor_id,
    T &recv)
{
#ifdef DOGOS_HAVE_MPI
    if (dest_processor_id   == DOGOS_MPI::get_MPI_world().get_rank() &&
            source_processor_id == DOGOS_MPI::get_MPI_world().get_rank())
    {
        recv = send;
        return;
    }

    MPI_Sendrecv(&send, 1, datatype<T>(),
                 dest_processor_id, 0,
                 &recv, 1, datatype<T>(),
                 source_processor_id, 0,
                 DOGOS_MPI::get_MPI_world().get_communicator(),
                 MPI_STATUS_IGNORE);
#else
    (void)dest_processor_id;
    (void)send;
    (void)source_processor_id;
    (void)recv;
#endif
}


template <typename T>
inline void
send_receive (
    const int dest_processor_id,
    std::complex<T> &send,
    const int source_processor_id,
    std::complex<T> &recv)
{
#ifdef DOGOS_HAVE_MPI
    if (dest_processor_id   == DOGOS_MPI::get_MPI_world().get_rank() &&
            source_processor_id == DOGOS_MPI::get_MPI_world().get_rank())
    {
        recv = send;
        return;
    }

    MPI_Sendrecv(&send, 2, datatype<T>(),
                 dest_processor_id, 0,
                 &recv, 2, datatype<T>(),
                 source_processor_id, 0,
                 DOGOS_MPI::get_MPI_world().get_communicator(),
                 MPI_STATUS_IGNORE);
#else
    (void)dest_processor_id;
    (void)send;
    (void)source_processor_id;
    (void)recv;
#endif
}


template <typename T>
inline void
send_receive (
    const int dest_processor_id,
    std::vector<T> &send,
    const int source_processor_id,
    std::vector<T> &recv)
{
#ifdef DOGOS_HAVE_MPI
    // Call the user-defined type version with automatic
    // type conversion based on template argument:
    send_receive (dest_processor_id,
                  send,
                  source_processor_id,
                  recv,
                  datatype<T>());
#else
    (void)dest_processor_id;
    (void)send;
    (void)source_processor_id;
    (void)recv;
#endif
}


template <typename T>
inline void
send_receive (
    const int dest_processor_id,
    std::vector<std::vector<T> > &send,
    const int source_processor_id,
    std::vector<std::vector<T> > &recv)
{
#ifdef DOGOS_HAVE_MPI
    if (dest_processor_id   == DOGOS_MPI::get_MPI_world().get_rank() &&
            source_processor_id == DOGOS_MPI::get_MPI_world().get_rank())
    {
        recv = send;
        return;
    }

    // temporary buffers - these will be sized in bytes
    // and manipulated with MPI_Pack and friends
    std::vector<char> sendbuf, recvbuf;

    // figure out how many bytes we need to pack all the data
    int packedsize=0, sendsize=0;

    // The outer buffer size
    MPI_Pack_size (1,
                   datatype<unsigned int>(),
                   DOGOS_MPI::get_MPI_world().get_communicator(),
                   &packedsize);
    sendsize += packedsize;

    for (unsigned int i=0; i<send.size(); i++)
    {
        // The size of the ith inner buffer
        MPI_Pack_size (1,
                       datatype<unsigned int>(),
                       DOGOS_MPI::get_MPI_world().get_communicator(),
                       &packedsize);
        sendsize += packedsize;

        // The data for each inner buffer
        MPI_Pack_size (static_cast<int>(send[i].size()),
                       datatype<T>(),
                       DOGOS_MPI::get_MPI_world().get_communicator(),
                       &packedsize);
        sendsize += packedsize;
    }

    TBOX_ASSERT (sendsize /* should at least be 1! */);
    sendbuf.resize (sendsize);

    // Pack the send buffer
    int pos=0;

    // ... the size of the outer buffer
    sendsize = static_cast<int>(send.size());
    MPI_Pack (
        &sendsize,
        1,
        datatype<unsigned int>(),
        &sendbuf[0],
        static_cast<int>(sendbuf.size()),
        &pos,
        DOGOS_MPI::get_MPI_world().get_communicator());

    for (unsigned int i=0; i<send.size(); i++)
    {
        // ... the size of the ith inner buffer
        sendsize = static_cast<int>(send[i].size());
        MPI_Pack (
            &sendsize,
            1,
            datatype<unsigned int>(),
            &sendbuf[0],
            static_cast<int>(sendbuf.size()),
            &pos,
            DOGOS_MPI::get_MPI_world().get_communicator());

        // ... the contents of the ith inner buffer
        if (!send[i].empty())
        {
            MPI_Pack (
                &send[i][0],
                static_cast<int>(send[i].size()),
                datatype<T>(),
                &sendbuf[0],
                static_cast<int>(sendbuf.size()),
                &pos,
                DOGOS_MPI::get_MPI_world().get_communicator());
        }
    }

    TBOX_ASSERT (static_cast<unsigned int>(pos) == sendbuf.size());

    Parallel::request request_value;

    Parallel::nonblocking_send (dest_processor_id,
                                sendbuf,
                                MPI_PACKED,
                                request_value,
                                /* tag = */ 123);

    Parallel::receive (source_processor_id,
                       recvbuf,
                       MPI_PACKED,
                       /* tag = */ 123);

    // Unpack the received buffer
    TBOX_ASSERT (!recvbuf.empty());
    pos=0;
    MPI_Unpack (
        &recvbuf[0],
        static_cast<int>(recvbuf.size()),
        &pos,
        &sendsize,
        1,
        datatype<unsigned int>(),
        DOGOS_MPI::get_MPI_world().get_communicator());

    // ... size the outer buffer
    recv.resize (sendsize);

    for (unsigned int i=0; i<recv.size(); i++)
    {
        MPI_Unpack (
            &recvbuf[0],
            static_cast<int>(recvbuf.size()),
            &pos,
            &sendsize,
            1,
            datatype<unsigned int>(),
            DOGOS_MPI::get_MPI_world().get_communicator());

        // ... size the inner buffer
        recv[i].resize (sendsize);

        // ... unpack the inner buffer if it is not empty
        if (!recv[i].empty())
            MPI_Unpack (&recvbuf[0], static_cast<int>(recvbuf.size()), &pos,
                        &recv[i][0], static_cast<int>(recv[i].size()), datatype<T>(),
                        DOGOS_MPI::get_MPI_world().get_communicator());
    }

    Parallel::wait (request_value);
#else
    (void)dest_processor_id;
    (void)send;
    (void)source_processor_id;
    (void)recv;
#endif
}


template <typename T>
inline void
gather(
    const int root_id,
    T send,
    std::vector<T> &recv)
{
#ifdef DOGOS_HAVE_MPI
    TBOX_ASSERT (root_id < DOGOS_MPI::get_MPI_world().size());

    if (DOGOS_MPI::get_MPI_world().get_rank() == root_id)
        recv.resize (DOGOS_MPI::get_MPI_world().size());

    if (DOGOS_MPI::get_MPI_world().size() > 1)
    {
        MPI_Gather(&send,
                   1,
                   datatype<T>(),
                   recv.empty() ? NULL : &recv[0],
                   1,
                   datatype<T>(),
                   root_id,
                   DOGOS_MPI::get_MPI_world().get_communicator());

    }
    else
        recv[0] = send;
#else
    (void)root_id;
    (void)send;
    (void)recv;
#endif
}


template <typename T>
inline void
gather(
    const int root_id,
    std::complex<T> send,
    std::vector<std::complex<T> > &recv)
{
#ifdef DOGOS_HAVE_MPI
    TBOX_ASSERT (root_id < DOGOS_MPI::get_MPI_world().size());

    if (DOGOS_MPI::get_MPI_world().get_rank() == root_id)
        recv.resize (DOGOS_MPI::get_MPI_world().size());

    if (DOGOS_MPI::get_MPI_world().size() > 1)
    {
        MPI_Gather(&send,
                   2,
                   datatype<T>(),
                   recv.empty() ? NULL : &recv[0],
                   2,
                   datatype<T>(),
                   root_id,
                   DOGOS_MPI::get_MPI_world().get_communicator());
    }
    else
        recv[0] = send;
#else
    (void)root_id;
    (void)send;
    (void)recv;
#endif
}


/*
 *************************************************************************
 *                                                                       *
 * This function provides a convenient method                            *
 * for combining vectors from each processor into one                    *
 * contiguous chunk on one processor.  This handles the                  *
 * case where the lengths of the vectors may vary.                       *
 * Specifically, this function transforms this:                          *
 *                                                                       *
 *    Processor 0: [ ... N_0 ]                                           *
 *    Processor 1: [ ....... N_1 ]                                       *
 *        ...                                                            *
 *    Processor M: [ .. N_M]                                             *
 *                                                                       *
 * into this:                                                            *
 *                                                                       *
 *    [ [ ... N_0 ] [ ....... N_1 ] ... [ .. N_M] ]                      *
 *                                                                       *
 * on processor root_id. This function is collective and therefore       *
 * must be called by all processors.                                     *
 *                                                                       *
 *************************************************************************
 */

template <typename T>
inline void
gather(
    const int root_id,
    std::vector<T> &r)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() == 1)
    {
        TBOX_ASSERT (DOGOS_MPI::get_MPI_world().get_rank()==root_id);
        return;
    }

    std::vector<int>
    sendlengths  (DOGOS_MPI::get_MPI_world().size(), 0),
                 displacements (DOGOS_MPI::get_MPI_world().size(), 0);

    const int mysize = static_cast<int>(r.size());
    Parallel::allgather(mysize, sendlengths);

    // Find the total size of the final array and
    // set up the displacement offsets for each processor.
    unsigned int globalsize = 0;
    for (int i=0; i != DOGOS_MPI::get_MPI_world().size(); ++i)
    {
        displacements[i] = globalsize;
        globalsize += sendlengths[i];
    }

    // Check for quick return
    if (globalsize == 0)
    {
        return;
    }

    // copy the input buffer
    std::vector<T> r_src (r);

    // now resize it to hold the global data
    // on the receiving processor
    if (root_id == DOGOS_MPI::get_MPI_world().get_rank())
        r.resize (globalsize);

    // and get the data from the remote processors
    // Only catch the return value when asserts are active.
    const int ierr =
        MPI_Gatherv (r_src.empty() ? NULL : &r_src[0], mysize, datatype<T>(),
                     r.empty() ? NULL :  &r[0], &sendlengths[0],
                     &displacements[0], datatype<T>(),
                     root_id,
                     DOGOS_MPI::get_MPI_world().get_communicator());

    TBOX_ASSERT (ierr == MPI_SUCCESS);
#else
    (void)root_id;
    (void)r;
#endif
}


template <typename T>
inline void
gather(
    const int root_id,
    std::vector<std::complex<T> > &r)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() == 1)
    {
        TBOX_ASSERT (DOGOS_MPI::get_MPI_world().get_rank()==root_id);
        return;
    }

    std::vector<int>
    sendlengths  (DOGOS_MPI::get_MPI_world().size(), 0),
                 displacements (DOGOS_MPI::get_MPI_world().size(), 0);

    const int mysize = static_cast<int>(r.size()) * 2;
    Parallel::allgather(mysize, sendlengths);

    // Find the total size of the final array and
    // set up the displacement offsets for each processor.
    unsigned int globalsize = 0;
    for (unsigned int i=0; i != DOGOS_MPI::get_MPI_world().size(); ++i)
    {
        displacements[i] = globalsize;
        globalsize += sendlengths[i];
    }

    // Check for quick return
    if (globalsize == 0)
    {
        return;
    }

    // Make temporary buffers for the input/output data
    std::vector<std::complex<T> > r_src (r);

    // now resize r to hold the global data
    // on the receiving processor
    if (root_id == DOGOS_MPI::get_MPI_world().get_rank())
        r.resize (globalsize/2);

    // and get the data from the remote processors
    const int ierr =
        MPI_Gatherv (r_src.empty() ? NULL : &r_src[0], mysize, datatype<T>(),
                     r.empty() ? NULL : &r[0], &sendlengths[0],
                     &displacements[0], datatype<T>(),
                     root_id, DOGOS_MPI::get_MPI_world().get_communicator());
    TBOX_ASSERT (ierr == MPI_SUCCESS);
#else
    (void)root_id;
    (void)r;
#endif
}


template <typename T>
inline void
gather(
    const int root_id,
    std::set<T> &set)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() == 1)
    {
        TBOX_ASSERT (DOGOS_MPI::get_MPI_world().get_rank()==root_id);
        return;
    }

    std::vector<T> key;
    key.reserve (set.size());

    typename std::set<T>::iterator set_it = set.begin();
    typename std::set<T>::iterator set_it_end = set.end();
    for (; set_it!=set_it_end; ++set_it)
    {
        key.push_back(*set_it);
    }

    gather(root_id, key);

    if(DOGOS_MPI::get_MPI_world().get_rank()==root_id)
    {
        for (unsigned int n=0; n<key.size(); n++)
            set.insert (key[n]);
    }
#else
    (void)root_id;
    (void)set;
#endif
}


template <typename T1, typename T2>
inline void
gather(
    const unsigned int root_id,
    std::map<T1, T2> &map)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() == 1)
    {
        TBOX_ASSERT (DOGOS_MPI::get_MPI_world().get_rank() == root_id);
        return;
    }

    std::vector<T1> key;
    std::vector<T2> value;
    key.reserve (map.size());
    value.reserve (map.size());

    typename std::map<T1, T2>::iterator map_it = map.begin();
    typename std::map<T1, T2>::iterator map_it_end = map.end();
    for (; map_it!=map_it_end; ++map_it)
    {
        key.push_back((*map_it).first);
        value.push_back((*map_it).second);
    }

    gather(root_id, key);
    gather(root_id, value);

    if(DOGOS_MPI::get_MPI_world().get_rank()==root_id)
    {
        TBOX_ASSERT (key.size() == value.size());
        for (unsigned int n=0; n<key.size(); n++)
        {
            map.insert (std::pair<T1, T2>(key[n], value[n]));
        }
    }
#else
    (void)root_id;
    (void)map;
#endif
}


template <typename T>
inline void
allgather(
    T send,
    std::vector<T> &recv)
{
#ifdef DOGOS_HAVE_MPI
    recv.resize (DOGOS_MPI::get_MPI_world().size());

    if (DOGOS_MPI::get_MPI_world().size() > 1)
    {
        MPI_Allgather (&send,
                       1,
                       datatype<T>(),
                       &recv[0],
                       1,
                       datatype<T>(),
                       DOGOS_MPI::get_MPI_world().get_communicator());
    }
    else
        recv[0] = send;
#else
    recv[0] = send;
#endif
}


template <typename T>
inline void
allgather(
    std::complex<T> send,
    std::vector<std::complex<T> > &recv)
{
#ifdef DOGOS_HAVE_MPI
    recv.resize (DOGOS_MPI::get_MPI_world().size());

    if (DOGOS_MPI::get_MPI_world().size() > 1)
    {
        MPI_Allgather (&send,
                       2,
                       datatype<T>(),
                       &recv[0],
                       2,
                       datatype<T>(),
                       DOGOS_MPI::get_MPI_world().get_communicator());
    }
    else
        recv[0] = send;
#else
    recv[0] = send;
#endif
}


/*
 *************************************************************************
 *                                                                       *
 * This function provides a convenient method                            *
 * for combining vectors from each processor into one                    *
 * contiguous chunk on one processor.  This handles the                  *
 * case where the lengths of the vectors may vary.                       *
 * Specifically, this function transforms this:                          *
 *                                                                       *
 *    Processor 0: [ ... N_0 ]                                           *
 *    Processor 1: [ ....... N_1 ]                                       *
 *        ...                                                            *
 *    Processor M: [ .. N_M]                                             *
 *                                                                       *
 * into this:                                                            *
 *                                                                       *
 *    [ [ ... N_0 ] [ ....... N_1 ] ... [ .. N_M] ]                      *
 *                                                                       *
 * on processor root_id. This function is collective and therefore       *
 * must be called by all processors.                                     *
 *                                                                       *
 *************************************************************************
 */

template <typename T>
inline void
allgather(
    std::vector<T> &r,
    const bool identical_buffer_sizes)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() == 1)
        return;

    if (identical_buffer_sizes)
    {
        std::vector<T> r_src (r.size()*DOGOS_MPI::get_MPI_world().size());
        r_src.swap(r);
        MPI_Allgather (r_src.empty() ? NULL : &r_src[0],
                       static_cast<int>(r_src.size()),
                       datatype<T>(),
                       r.empty() ? NULL : &r[0],
                       static_cast<int>(r_src.size()),
                       datatype<T>(),
                       DOGOS_MPI::get_MPI_world().get_communicator());
        return;
    }

    std::vector<int>
    sendlengths  (DOGOS_MPI::get_MPI_world().size(), 0),
                 displacements (DOGOS_MPI::get_MPI_world().size(), 0);

    const int mysize = static_cast<int>(r.size());
    Parallel::allgather(mysize, sendlengths);

    // Find the total size of the final array and
    // set up the displacement offsets for each processor.
    unsigned int globalsize = 0;
    for (int i=0; i != DOGOS_MPI::get_MPI_world().size(); ++i)
    {
        displacements[i] = globalsize;
        globalsize += sendlengths[i];
    }

    // Check for quick return
    if (globalsize == 0)
    {
        return;
    }

    // copy the input buffer
    std::vector<T> r_src (globalsize);
    r_src.swap(r);

    // and get the data from the remote processors.
    // Pass NULL if our vector is empty.

    // Only catch the return value when asserts are active.
    const int ierr =
        MPI_Allgatherv (r_src.empty() ? NULL : &r_src[0], mysize, datatype<T>(),
                        r.empty()     ? NULL : &r[0],     &sendlengths[0],
                        &displacements[0], datatype<T>(), DOGOS_MPI::get_MPI_world().get_communicator());

    TBOX_ASSERT (ierr == MPI_SUCCESS);
#else
    (void)r;
    (void)identical_buffer_sizes;
#endif
}


template <typename T>
inline void
allgather(
    std::vector<std::complex<T> > &r,
    const bool identical_buffer_sizes = false)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() == 1)
        return;

    if (identical_buffer_sizes)
    {
        std::vector<std::complex<T> > r_src (r.size()*DOGOS_MPI::get_MPI_world().size());
        r_src.swap(r);
        MPI_Allgather (r_src.empty() ? NULL : &r_src[0],
                       static_cast<int>(r_src.size()) * 2,
                       datatype<T>(),
                       r.empty() ? NULL : &r[0],
                       static_cast<int>(r_src.size()) * 2,
                       datatype<T>(),
                       DOGOS_MPI::get_MPI_world().get_communicator());
        return;
    }

    std::vector<int>
    sendlengths  (DOGOS_MPI::get_MPI_world().size(), 0),
                 displacements (DOGOS_MPI::get_MPI_world().size(), 0);

    const int mysize = static_cast<int>(r.size()) * 2;
    Parallel::allgather(mysize, sendlengths);

    // Find the total size of the final array and
    // set up the displacement offsets for each processor.
    unsigned int globalsize = 0;
    for (int i=0; i != DOGOS_MPI::get_MPI_world().size(); ++i)
    {
        displacements[i] = globalsize;
        globalsize += sendlengths[i];
    }

    // Check for quick return
    if (globalsize == 0)
    {
        return;
    }

    // copy the input buffer
    std::vector<std::complex<T> > r_src (globalsize);
    r_src.swap(r);

    // and get the data from the remote processors.
    // Pass NULL if our vector is empty.
    const int ierr =
        MPI_Allgatherv (r_src.empty() ? NULL : &r_src[0], mysize, datatype<T>(),
                        r.empty()     ? NULL : &r[0],     &sendlengths[0],
                        &displacements[0], datatype<T>(),
                        DOGOS_MPI::get_MPI_world().get_communicator());
    TBOX_ASSERT (ierr == MPI_SUCCESS);
#else
    (void)r;
    (void)identical_buffer_sizes;
#endif
}


template <typename T>
inline void
allgather(
    std::vector<TypeVector<T> > &r)
{
#ifdef DOGOS_HAVE_MPI
    const DOGOS_MPI& mpi = DOGOS_MPI::get_MPI_world();
    if (mpi.size() == 1)
        return;

    std::vector<int>
    sendlengths  (mpi.size(), 0),
                 displacements (mpi.size(), 0);

    const int mysize = static_cast<int>(r.size()) * 3;
    Parallel::allgather(mysize, sendlengths);

    // Find the total size of the final array and
    // set up the displacement offsets for each processor.
    unsigned int globalsize = 0;
    for (int i=0; i != mpi.size(); ++i)
    {
        displacements[i] = globalsize;
        globalsize += sendlengths[i];
    }

    // Check for quick return
    if (globalsize == 0)
    {
        return;
    }

    // set the input/output buffer
    std::vector<T> r_src (mysize), r_dst (globalsize);
    for (std::size_t n = 0; n < r.size(); ++n)
    {
        r_src[3*n+0] = r[n][0];
        r_src[3*n+1] = r[n][1];
        r_src[3*n+2] = r[n][2];
    }

    // and get the data from the remote processors.
    // Pass NULL if our vector is empty.
    const int ierr =
        MPI_Allgatherv (r_src.empty() ? NULL : &r_src[0], mysize, datatype<T>(),
                        r.empty()     ? NULL : &r_dst[0], &sendlengths[0],
                        &displacements[0], datatype<T>(),
                        mpi.get_communicator());
    TBOX_ASSERT (ierr == MPI_SUCCESS);

    r.clear();
    for (unsigned int n = 0; n < r_dst.size();)
    {
        r.push_back(TypeVector<T>(r_dst[n++], r_dst[n++], r_dst[n++]));
    }
#else
    (void)r;
#endif
}


template <typename T>
inline void
allgather(
    std::set<T> &set)
{
#ifdef DOGOS_HAVE_MPI
    const DOGOS_MPI& mpi = DOGOS_MPI::get_MPI_world();
    if (mpi.size() == 1)
        return;

    std::vector<T> key;
    key.reserve (set.size());

    typename  std::set<T>::iterator set_it = set.begin();
    typename  std::set<T>::iterator set_it_end = set.end();
    for (; set_it!=set_it_end; ++set_it)
    {
        key.push_back(*set_it);
    }

    allgather(key);

    for (unsigned int n=0; n<key.size(); ++n)
        set.insert (key[n]);
#else
    (void)set;
#endif
}


template <typename T1, typename T2>
inline void
allgather(
    std::map<T1, T2> &map)
{
#ifdef DOGOS_HAVE_MPI
    const DOGOS_MPI& mpi = DOGOS_MPI::get_MPI_world();
    if (mpi.size() == 1)
        return;

    std::vector<T1> key;
    std::vector<T2> value;
    key.reserve (map.size());
    value.reserve (map.size());

    typename  std::map<T1, T2>::iterator map_it = map.begin();
    typename  std::map<T1, T2>::iterator map_it_end = map.end();
    for (; map_it!=map_it_end; ++map_it)
    {
        key.push_back((*map_it).first);
        value.push_back((*map_it).second);
    }

    allgather(key);
    allgather(value);
    TBOX_ASSERT (key.size()==value.size());

    for (unsigned int n=0; n<key.size(); ++n)
        map.insert (std::pair<T1, T2>(key[n], value[n]));
#else
    (void)map;
#endif
}


/*
 *************************************************************************
 *                                                                       *
 * This function provides a convenient method                            *
 * for combining vectors from each processor into one                    *
 * contiguous chunk on one processor.  This handles the                  *
 * case where the lengths of the vectors may vary.                       *
 * Specifically, this function transforms this:                          *
 *                                                                       *
 *    Processor 0: [ ... N_0 ]                                           *
 *    Processor 1: [ ....... N_1 ]                                       *
 *        ...                                                            *
 *    Processor M: [ .. N_M]                                             *
 *                                                                       *
 * into this:                                                            *
 *                                                                       *
 *    [ [ ... N_0 ] [ ....... N_1 ] ... [ .. N_M] ]                      *
 *                                                                       *
 * on processor root_id. This function is collective and therefore       *
 * must be called by all processors.                                     *
 *                                                                       *
 *************************************************************************
 */

template <typename T1, typename T2>
inline void
allgather(
    std::vector<std::map<T1, T2> > &r)
{
    (void)r;
    TBOX_ERROR ("not finished");
#if 0
    if (DOGOS_MPI::get_MPI_world().size() == 1)
        return;

    std::vector<int>
    sendlengths  (DOGOS_MPI::get_MPI_world().size(), 0),
                 displacements (DOGOS_MPI::get_MPI_world().size(), 0);

    std::vector<T1> key;
    std::vector<T2> value;

    const int mysize = r.size();
    Parallel::allgather(mysize, sendlengths);

    // Find the total size of the final array and
    // set up the displacement offsets for each processor.
    unsigned int globalsize = 0;
    for (int i=0; i != DOGOS_MPI::get_MPI_world().size(); ++i)
    {
        displacements[i] = globalsize;
        globalsize += sendlengths[i];
    }

    // Check for quick return
    if (globalsize == 0)
    {
        return;
    }

    // copy the input buffer
    std::vector<T> r_src (globalsize);
    r_src.swap(r);

    // and get the data from the remote processors.
    // Pass NULL if our vector is empty.

    // Only catch the return value when asserts are active.
    const int ierr =
        MPI_Allgatherv (r_src.empty() ? NULL : &r_src[0], mysize, datatype<T>(),
                        r.empty()     ? NULL : &r[0],     &sendlengths[0],
                        &displacements[0], datatype<T>(), DOGOS_MPI::get_MPI_world().get_communicator());

    TBOX_ASSERT (ierr == MPI_SUCCESS);
#endif
}


/*
 *************************************************************************
 *                                                                       *
 * Replaces the input buffer with the result of MPI_Alltoall.            *
 * The vector size must be of te form N*n_procs, where N is              *
 * the number of elements to be sent/received from each                  *
 * processor.                                                            *
 *                                                                       *
 *************************************************************************
 */

template <typename T>
inline void
alltoall(
    std::vector<T> &buf)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() == 1)
        return;

    // the per-processor size.  this is the same for all
    // processors using MPI_Alltoall, could be variable
    // using MPI_Alltoallv
    const unsigned int size_per_proc =
        buf.size()/DOGOS_MPI::get_MPI_world().size();

    TBOX_ASSERT (buf.size()%DOGOS_MPI::get_MPI_world().size() == 0);

    std::vector<T> tmp(buf);

    // Only catch the return value when asserts are active.
    const int ierr =
        MPI_Alltoall (tmp.empty() ? NULL : &tmp[0],
                      size_per_proc,
                      datatype<T>(),
                      buf.empty() ? NULL : &buf[0],
                      size_per_proc,
                      datatype<T>(),
                      DOGOS_MPI::get_MPI_world().get_communicator());
    TBOX_ASSERT (ierr == MPI_SUCCESS);
#else
    (void)buf;
#endif
}


template <typename T>
inline void
broadcast (
    T &data,
    const int root_id)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() == 1)
    {
        TBOX_ASSERT (DOGOS_MPI::get_MPI_world().get_rank() == root_id);
        return;
    }

    // Spread data to remote processors.
    // Only catch the return value when asserts are active.
    const int ierr =
        MPI_Bcast (&data, 1, datatype<T>(), root_id, DOGOS_MPI::get_MPI_world().get_communicator());

    TBOX_ASSERT (ierr == MPI_SUCCESS);
#else
    (void)data;
    (void)root_id;
#endif
}


template <typename T>
inline void
broadcast (
    T* data,
    int count,
    int root_id)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() == 1)
    {
        TBOX_ASSERT (DOGOS_MPI::get_MPI_world().get_rank() == root_id);
        return;
    }

    // Spread data to remote processors.
    // Only catch the return value when asserts are active.
    const int ierr =
        MPI_Bcast (data, count, datatype<T>(), root_id, DOGOS_MPI::get_MPI_world().get_communicator());

    TBOX_ASSERT (ierr == MPI_SUCCESS);
#else
    (void)data;
    (void)count;
    (void)root_id;
#endif
}


template <typename T>
inline void
broadcast (
    std::complex<T> &data,
    const int root_id)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() == 1)
    {
        TBOX_ASSERT (DOGOS_MPI::get_MPI_world().get_rank() == root_id);
        return;
    }

    // Spread data to remote processors.
    const int ierr =
        MPI_Bcast (&data, 2, datatype<T>(), root_id, DOGOS_MPI::get_MPI_world().get_communicator());
    TBOX_ASSERT (ierr == MPI_SUCCESS);
#else
    (void)data;
    (void)root_id;
#endif
}


template <>
inline void
broadcast (
    std::string &data,
    const int root_id)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() == 1)
    {
        TBOX_ASSERT (DOGOS_MPI::get_MPI_world().get_rank() == root_id);
        return;
    }

    int data_size = static_cast<int>(data.size());
    Parallel::broadcast (data_size, root_id);

    std::vector<char> data_c (data_size);
    std::string orig(data);

    if (DOGOS_MPI::get_MPI_world().get_rank() == root_id)
        for (unsigned int i=0; i<data.size(); i++)
            data_c[i] = data[i];

    Parallel::broadcast (data_c,root_id);

    data.clear();
    data.reserve (data_c.size());
    for (unsigned int i=0; i<data_c.size(); i++)
        data.push_back(data_c[i]);

    if (DOGOS_MPI::get_MPI_world().get_rank() == root_id)
        TBOX_ASSERT (data == orig);
#else
    (void)data;
    (void)root_id;
#endif
}


template <typename T>
inline void
broadcast (
    std::vector<T> &data,
    const int root_id)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() == 1)
    {
        TBOX_ASSERT (DOGOS_MPI::get_MPI_world().get_rank() == root_id);
        return;
    }

    // and get the data from the remote processors.
    // Pass NULL if our vector is empty.
    // Only catch the return value when asserts are active.
    const int ierr =
        MPI_Bcast (data.empty() ? NULL : &data[0], static_cast<int>(data.size()), datatype<T>(),
                   root_id, DOGOS_MPI::get_MPI_world().get_communicator());

    TBOX_ASSERT (ierr == MPI_SUCCESS);
#else
    (void)data;
    (void)root_id;
#endif
}


template <typename T>
inline void
broadcast (
    std::vector<std::complex<T> > &data,
    const int root_id)
{
#ifdef DOGOS_HAVE_MPI
    if (DOGOS_MPI::get_MPI_world().size() == 1)
    {
        TBOX_ASSERT (DOGOS_MPI::get_MPI_world().get_rank() == root_id);
        return;
    }

    // and get the data from the remote processors.
    // Pass NULL if our vector is empty.
    const int ierr =
        MPI_Bcast (
            data.empty() ? NULL : &data[0],
            static_cast<int>(data.size()) * 2,
            datatype<T>(),
            root_id,
            DOGOS_MPI::get_MPI_world().get_communicator());
    TBOX_ASSERT (ierr == MPI_SUCCESS);
#else
    (void)data;
    (void)root_id;
#endif
}

} //namespace Parallel

} //namespace DOGOS
