/* benchmark guru interfaces */
#include <Utilities/RandomGenerator.h>
#include <Configuration.h>
#include <mpi/communicator.h>
#include <mpi/collectives.h>
#include <Utilities/Timer.h>
#include <OhmmsPETE/OhmmsMatrix.h>
#include <OhmmsPETE/TinyVector.h>
#include <Message/OpenMP.h>
#include <benchmark/transpose.h>
#include <fstream>
using namespace std;
using namespace APPNAMESPACE;

template<typename T>
inline void check_transpose(Matrix<T>& A,  Matrix<T>& trA, ostream& os)
{
  os << "Before transposes " << endl;
  os << A << endl;
  bool fine=true;
  const double eps=numeric_limits<T>::epsilon()*10.;
  for(int i=0; i<A.rows(); ++i)
    for(int j=0; j<A.cols(); ++j)
      if(abs(A(i,j)-trA(j,i))>eps) fine=false;
  if(fine)
    os<< "Transpose is correct " << endl;
  else
  {
    os << "Incorrect tranpose " << endl;
    os<< trA << endl;
  }
}

template<typename T, unsigned TRANSENG>
struct TransposeHelper
{
  int sizeN;
  int sizeM;
  int locN;
  int locM;
  int block_size;
  int num_blocks;
  TinyVector<double,3> tt;
  const mpi::communicator& mycomm;
  vector<int> send_count;
  vector<int> send_offset;
  vector<int> recv_count;
  vector<int> recv_offset;
  Matrix<T> mybuffer;

  inline TransposeHelper(int n, int m, const mpi::communicator& c)
    :sizeN(n),sizeM(m),tt(0.0),mycomm(c)
  {
    num_blocks=mycomm.size();
    locN=sizeN/num_blocks;
    locM=sizeM/num_blocks;
    block_size=locN*locM;
    send_count.resize(num_blocks,locN*locM);
    send_offset.resize(num_blocks,0);
    for(int i=0; i<num_blocks; ++i) send_offset[i]=i*locN*locM;
    recv_count=send_count;
    recv_offset=send_offset;

    if(num_blocks>1) mybuffer.resize(num_blocks,locN*locM);
  }

  /** pack input data for all-to-all communication
   * @param in starting address of the input(locN,sizeM)
   * @param out starting address of a buffer(size,locN*locM)
   *
   * out buffer is used to pack the input data for all-to-all 
   * \code
   * for(int ip=0; ip<mycomm.size(); ++ip)
   *   for(int i=0; i<locM; ++i)
   *     for(int j=0; j<locN; ++j,++ij)
   *       out(ip,ij)=in(j,ip*locM+i);
   * \endcode
   */
  inline void pack(const T* restrict in, T* restrict out)
  {
    T* restrict tptr=out;
    for(int ip=0; ip<num_blocks; ++ip)
    {
      for(int i=0; i<locM; ++i)
      {
        const T* aptr=in+ip*locM+i;
        for(int j=0; j<locN; ++j,aptr+=sizeM) *tptr++=*aptr;
      }
    }
  }

  /** unpack data after all-to-all 
   * @param out starting address (locM,sizeN)
   *
   * \code
   * for(int ip=0; ip<num_blocks; ++ip)
   * {
   *   int ij=0;
   *   for(int i=0; i<locM; ++i)
   *     for(int j=0; j<locN; ++j,++ij)
   *        out(i,ip*locN+j)=buffer(ip,ij);
   *  }
   * \endcode
   */
  inline void unpack(T* restrict out)
  {
    for(int ip=0; ip<num_blocks;++ip)
    {
      const T* aptr=mybuffer.data()+ip*block_size;
      for(int i=0,iny=ip; i<locM; ++i,iny+=num_blocks)
      {
        T* tptr=out+iny*locN;
        for(int j=0; j<locN; ++j) *tptr++=*aptr++;
      }
    }
  }

  /** transpose 
   * @param in starting address of the input data(locN,sizeM)
   * @param out starting address of the output data(locM,sizeN)
   *
   * out is temporarily used for alltoall
   */
  inline void apply(const T* restrict in, T* restrict out)
  {
    Timer myclock;
    pack(in,out);
    tt[0]+=myclock.elapsed();
    myclock.restart();
    MPI_Datatype what=mpi::get_mpi_datatype(T());
#if defined(USE_ALLTOALLV)
    int ierr=MPI_Alltoallv(out,&send_count[0],&send_offset[0],what,mybuffer.data(),&recv_count[0],&recv_offset[0],what,mycomm);
#else
    int ierr=MPI_Alltoall(out,block_size,what,mybuffer.data(),block_size,what,mycomm);
#endif
    tt[1]+=myclock.elapsed();
    myclock.restart();
    unpack(out);
    tt[2]+=myclock.elapsed();
  }

  /** transpose a Matrix
   */
  inline void apply(const Matrix<T>& a, Matrix<T>& trA)
  {
    if(num_blocks>1)
      apply(a.data(),trA.data());
    else
      Transpose2D<T,TRANSENG>::apply(a,trA);
  }
};

int main(int argc, char** argv)
{
  mpi::environment env(argc,argv);
  mpi::communicator mycomm;
  OhmmsInfo ohmms("transpose",mycomm.rank(),0,1);

  typedef  double real_type;
  ///number of iterations
  int niters=10;
  ///rows
  int sizeN=4;
  ///columns
  int sizeM=4;
  ///partition 1
  int nx=1;
  ///partition 2
  int ny=4;
  ///if true, debug the implemnetation
  bool debug=false;

  int ic=0;
  while(ic<argc)
  {
    std::string a(argv[ic]);
    if(a == "opt_s") 
      sizeN=sizeM=atoi(argv[++ic]);
    else if(a == "opt_n")
      sizeN=atoi(argv[++ic]);
    else if(a == "opt_m")
      sizeM=atoi(argv[++ic]);
    else if(a == "opt_p") 
      ny=atoi(argv[++ic]);
    else if(a == "opt_i")
      niters=atoi(argv[++ic]);
    else if(a == "opt_debug")
      debug=true;
    ++ic;
  }

  ny=mycomm.size();
  int locN=sizeN/ny;
  int locM=sizeM/ny;

  typedef double value_type;
  //typedef int value_type;
  Matrix<value_type>  A(locN,sizeM), trA(locM,sizeN);
  TransposeHelper<value_type,0> forward(sizeN,sizeM,mycomm);

  //use A(i,j)=i*sizeM+j  where i=global locator
  for(int i=0; i<locN; ++i)
    for(int j=0; j<sizeM; ++j)
      A(i,j)=(locN*mycomm.rank()+i)*sizeM+j;

  if(debug)
  {
    forward.apply(A,trA);
    bool success=true;
    for(int i=0; i<locM; ++i)
    {
      for(int j=0; j<sizeN; ++j)
        if(abs(j*sizeM+mycomm.rank()*locM+i-trA(i,j))>1e-6) success=false;
    }
    if(success)
      cerr << mycomm.rank() << "  OK" << endl;
    else
      cerr << mycomm.rank() << "  failed" << endl;
  }

  Timer myclock;
  vector<double> tt(4,0.0);
  for(int i=0; i<niters; ++i)
  {
    for(int i=0; i<A.size(); ++i) A(i)=Random();
    myclock.restart();
    forward.apply(A,trA);
    tt[0]+=myclock.elapsed();
  }
  for(int i=0; i<3; ++i) tt[i+1]=forward.tt[i];
  mpi::reduce(mycomm,tt);

  app_log() << "Transpose " << sizeN << " " << sizeM << " "
    << locN << " " << locM << " " 
    << tt[0]/(mycomm.size()*niters) << " "
    << tt[2]/(mycomm.size()*niters) << " "
    << tt[1]/(mycomm.size()*niters) << " "
    << tt[3]/(mycomm.size()*niters) 
    << endl;

  return 0;
}

