﻿

#include "mpiWrapper.hpp"

#include <cstddef>
namespace UNAP {

template <typename T>
void allToallv(MPI_Comm mpiComm, std::vector<std::vector<T> >& sbuf,
               std::vector<T>& rbuf, std::vector<T*>& pbuf,
               const MPI_Datatype Ttype) {
  int P;
  MPI_Comm_size(mpiComm, &P);
#ifdef DEBUG
  ASSERT((int)sbuf.size() == P);
#endif
  int* iwork = new int[4 * P];
  int* ssizes = iwork;
  int* rsizes = ssizes + P;
  int* sdispl = ssizes + 2 * P;
  int* rdispl = ssizes + 3 * P;
  for (int p = 0; p < P; p++) {
    if (sbuf[p].size() >
        static_cast<std::size_t>(std::numeric_limits<int>::max())) {
      std::cerr << "# ERROR: 32bit integer overflow in allToallv!!"
                << std::endl;
      MPI_Abort(mpiComm, 1);
    }
    ssizes[p] = sbuf[p].size();
  }
  MPI_Alltoall(ssizes, 1, mpiType<int>(), rsizes, 1, mpiType<int>(), mpiComm);
  std::size_t totssize = std::accumulate(ssizes, ssizes + P, std::size_t(0)),
              totrsize = std::accumulate(rsizes, rsizes + P, std::size_t(0));

  if (true) {
    rbuf.resize(totrsize);
    MPI_Request* reqs = new MPI_Request[2 * (P - 1)];
    std::size_t displ = 0;
    pbuf.resize(P);
    int r, count = 0;
    MPI_Comm_rank(mpiComm, &r);
    for (int p = 0; p < P; p++) {
      int dst = (r + p) % P;
      pbuf[dst] = rbuf.data() + displ;
      if (dst != r) {
        MPI_Irecv(pbuf[dst], rsizes[dst], Ttype, dst, 0, mpiComm,
                  reqs + count++);
      } else {
        memcpy(pbuf[dst], sbuf[dst].data(), rsizes[dst] * sizeof(T));
      }
      displ += rsizes[dst];
    }
    for (int p = 0; p < P; p++) {
      int dst = (r + p) % P;
      if (dst != r)
        MPI_Isend(sbuf[dst].data(), ssizes[dst], Ttype, dst, 0, mpiComm,
                  reqs + count++);
    }
    MPI_Waitall(2 * (P - 1), reqs, MPI_STATUSES_IGNORE);
    std::vector<std::vector<T> >().swap(sbuf);
    delete[] reqs;
  } else {
    T* sendbuf_ = new T[totssize];
    T* sendbuf = sendbuf_;
    sdispl[0] = rdispl[0] = 0;
    for (int p = 1; p < P; p++) {
      sdispl[p] = sdispl[p - 1] + ssizes[p - 1];
      rdispl[p] = rdispl[p - 1] + rsizes[p - 1];
    }
    for (int p = 0; p < P; p++)
      std::copy(sbuf[p].begin(), sbuf[p].end(), sendbuf + sdispl[p]);
    std::vector<std::vector<T> >().swap(sbuf);
    rbuf.resize(totrsize);
    MPI_Alltoallv(sendbuf, ssizes, sdispl, Ttype, rbuf.data(), rsizes, rdispl,
                  Ttype, mpiComm);
    pbuf.resize(P);
    for (int p = 0; p < P; p++) pbuf[p] = rbuf.data() + rdispl[p];
    delete[] sendbuf_;
  }
  delete[] iwork;
}

void mpiSubComm(MPI_Comm& newComm, const MPI_Comm& comm, int P0, int P,
                int stride) {
  if (comm == MPI_COMM_NULL) {
    newComm = MPI_COMM_NULL;
    return;
  }
#ifdef DEBUG
  int commSize;
  MPI_Comm_size(comm, &commSize);
  ASSERT(P0 + P <= commSize);
#endif
  std::vector<int> sub_ranks(P);
  for (int i = 0; i < P; i++) sub_ranks[i] = P0 + i * stride;
  MPI_Group group, sub_group;
  MPI_Comm_group(comm, &group);
  MPI_Group_incl(group, P, sub_ranks.data(), &sub_group);
  MPI_Comm_create(comm, sub_group, &newComm);
  MPI_Group_free(&group);
  MPI_Group_free(&sub_group);
}

template <typename T>
MPI_Datatype Triplet<T>::tripletMpiType_ = MPI_DATATYPE_NULL;
template <typename T>
MPI_Datatype Triplet<T>::mpiType() {
  if (tripletMpiType_ == MPI_DATATYPE_NULL) {
    const int count = 3;
    int b[count] = {1, 1, 1};
    MPI_Datatype t[count] = {UNAP::mpiType<label>(), UNAP::mpiType<label>(),
                             UNAP::mpiType<T>()};
    MPI_Aint o[count] = {offsetof(Triplet<T>, r_), offsetof(Triplet<T>, c_),
                         offsetof(Triplet<T>, v_)};
    MPI_Datatype tmpMpiType;
    MPI_Type_create_struct(count, b, o, t, &tmpMpiType);
    MPI_Type_create_resized(tmpMpiType, 0, sizeof(Triplet<T>),
                            &tripletMpiType_);
    MPI_Type_free(&tmpMpiType);
    MPI_Type_commit(&tripletMpiType_);
  }
  return tripletMpiType_;
}

template <typename T>
void Triplet<T>::freeMpiType() {
  if (tripletMpiType_ != MPI_DATATYPE_NULL) {
    MPI_Type_free(&tripletMpiType_);
    tripletMpiType_ = MPI_DATATYPE_NULL;
  }
}

MPI_Datatype IdxIJ::idxijMpiType_ = MPI_DATATYPE_NULL;
MPI_Datatype IdxIJ::mpiType() {
  if (idxijMpiType_ == MPI_DATATYPE_NULL) {
    MPI_Type_contiguous(2, UNAP::mpiType<label>(), &idxijMpiType_);
    MPI_Type_commit(&idxijMpiType_);
  }
  return idxijMpiType_;
}
void IdxIJ::freeMpiType() {
  if (idxijMpiType_ != MPI_DATATYPE_NULL) {
    MPI_Type_free(&idxijMpiType_);
    idxijMpiType_ = MPI_DATATYPE_NULL;
  }
}

template <typename T>
MPI_Datatype IdxVal<T>::idxvalMpiType_ = MPI_DATATYPE_NULL;
template <typename T>
MPI_Datatype IdxVal<T>::mpiType() {
  if (idxvalMpiType_ == MPI_DATATYPE_NULL) {
    const int count = 2;
    int b[count] = {1, 1};
    MPI_Datatype t[count] = {UNAP::mpiType<label>(), UNAP::mpiType<T>()};
    MPI_Aint o[count] = {offsetof(IdxVal<T>, i_), offsetof(IdxVal<T>, v_)};
    MPI_Datatype tmpMpiType;
    MPI_Type_create_struct(count, b, o, t, &tmpMpiType);
    MPI_Type_create_resized(tmpMpiType, 0, sizeof(IdxVal<T>), &idxvalMpiType_);
    MPI_Type_free(&tmpMpiType);
    MPI_Type_commit(&idxvalMpiType_);
  }

  return idxvalMpiType_;
}
template <typename T>
void IdxVal<T>::freeMpiType() {
  if (idxvalMpiType_ != MPI_DATATYPE_NULL) {
    MPI_Type_free(&idxvalMpiType_);
    idxvalMpiType_ = MPI_DATATYPE_NULL;
  }
}

template class Triplet<scalar>;
template class Triplet<std::complex<scalar> >;
template class IdxVal<scalar>;
template class IdxVal<std::complex<scalar> >;

template void allToallv(MPI_Comm mpiComm,
                        std::vector<std::vector<label> >& sbuf,
                        std::vector<label>& rbuf, std::vector<label*>& pbuf,
                        const MPI_Datatype Ttype);
template void allToallv(MPI_Comm mpiComm,
                        std::vector<std::vector<scalar> >& sbuf,
                        std::vector<scalar>& rbuf, std::vector<scalar*>& pbuf,
                        const MPI_Datatype Ttype);
template void allToallv(MPI_Comm mpiComm, std::vector<std::vector<char> >& sbuf,
                        std::vector<char>& rbuf, std::vector<char*>& pbuf,
                        const MPI_Datatype Ttype);
template void allToallv(MPI_Comm mpiComm,
                        std::vector<std::vector<std::complex<scalar> > >& sbuf,
                        std::vector<std::complex<scalar> >& rbuf,
                        std::vector<std::complex<scalar>*>& pbuf,
                        const MPI_Datatype Ttype);
template void allToallv(MPI_Comm mpiComm,
                        std::vector<std::vector<Triplet<scalar> > >& sbuf,
                        std::vector<Triplet<scalar> >& rbuf,
                        std::vector<Triplet<scalar>*>& pbuf,
                        const MPI_Datatype Ttype);
template void allToallv(
    MPI_Comm mpiComm,
    std::vector<std::vector<Triplet<std::complex<scalar> > > >& sbuf,
    std::vector<Triplet<std::complex<scalar> > >& rbuf,
    std::vector<Triplet<std::complex<scalar> >*>& pbuf,
    const MPI_Datatype Ttype);
template void allToallv(MPI_Comm mpiComm,
                        std::vector<std::vector<IdxIJ> >& sbuf,
                        std::vector<IdxIJ>& rbuf, std::vector<IdxIJ*>& pbuf,
                        const MPI_Datatype Ttype);
template void allToallv(MPI_Comm mpiComm,
                        std::vector<std::vector<IdxVal<scalar> > >& sbuf,
                        std::vector<IdxVal<scalar> >& rbuf,
                        std::vector<IdxVal<scalar>*>& pbuf,
                        const MPI_Datatype Ttype);
template void allToallv(
    MPI_Comm mpiComm,
    std::vector<std::vector<IdxVal<std::complex<scalar> > > >& sbuf,
    std::vector<IdxVal<std::complex<scalar> > >& rbuf,
    std::vector<IdxVal<std::complex<scalar> >*>& pbuf,
    const MPI_Datatype Ttype);

}  // namespace UNAP
