#ifndef PCPS_PARALLEL_HEADER
#define PCPS_PARALLEL_HEADER

#include <src/pcps.h>
#include <src/input.h>
#include <src/hamiltonian.h>
#include <src/wavefunction.h>
#include <src/expectation_value.h>

namespace pcps {

  //----------------------------------------------------------------------------------
  // pcps::Parallel_Worker_Base -- A base class for worker objects used to compute
  //                               arrays in parralel.
  //----------------------------------------------------------------------------------

  template <class SCALAR> class Parallel_Worker_Base {

    public:
      virtual void do_work(const pcps::Input & userinp,
                           const pcps::Hamiltonian<SCALAR> & ham,
                           const pcps::Wavefunction<SCALAR> & wfn,
                           const int perturbation_level,
                           const boost::shared_array<SCALAR> & output_array,
                           const int array_size,
                           const int my_min,
                           const int my_max,
                           const std::vector<int> & slaves,
                           int & slaves_used,
                           int & jobs_in_progress) = 0;

  };

  //----------------------------------------------------------------------------------
  // pcps::compute_in_parallel -- Computes the values of an array in parallel.
  //
  //                              What array is calculated is controlled by the worker
  //                              object that is passed to this function.
  //
  //                              On the root process, the array is returned.
  //                              On non-root processes, an empty pointer is returned.
  //
  //----------------------------------------------------------------------------------

  template <class SCALAR>
  boost::shared_array<SCALAR> compute_in_parallel(const pcps::Input & userinp,
                                                   const pcps::Hamiltonian<SCALAR> & ham,
                                                   const pcps::Wavefunction<SCALAR> & wfn,
                                                   const int perturbation_level,
                                                   pcps::Parallel_Worker_Base<SCALAR> * const worker,
                                                   const int array_size) {

    // get MPI info
    const MPI::Comm & comm = MPI::COMM_WORLD;
    const int nproc = comm.Get_size();
    const int myrank = comm.Get_rank();

    // determine the type of parallelism to use
    const bool site_parallel = ( userinp.parallel_type() == pcps::keyword::Parallel_Site );
    const bool farm_parallel = ( userinp.parallel_type() == pcps::keyword::Parallel_Farm );

    // declare the array
    boost::shared_array<SCALAR> output_array;

    // In farm parallel mode, only the master process deals with setting up expectation values.
    // In site parallel mode, each processor computes the expectation values for its sites.
    if (myrank == 0 || site_parallel) {

      // allocate and initialize the output array
      output_array = pcps::allocate_shared_array<SCALAR>(array_size, pcps::zero<SCALAR>());

      // variables to keep track of what the slaves are doing in farm parallel mode
      std::vector<int> slaves(nproc-1);
      for (int i = 0; i < slaves.size(); i++)
        slaves.at(i) = i+1;
      int slaves_used = 0;
      int jobs_in_progress = 0;

      // determine which of the hamiltonian operators belong to this process
      const int extras = ham.ops().size() % nproc;
      const int my_min = ( site_parallel ? myrank * ( ham.ops().size() / nproc ) + std::min(myrank, extras) : 0 );
      const int my_max = ( site_parallel ? ( myrank + 1 ) * ( ham.ops().size() / nproc ) + std::min(myrank + 1, extras) : ham.ops().size() );

      // do whatever calculation the worker is designed to do
      worker->do_work(userinp, ham, wfn, perturbation_level, output_array, array_size, my_min, my_max, slaves, slaves_used, jobs_in_progress);

      // collect the results
      if (site_parallel) {
        boost::shared_array<SCALAR> reduce_target = pcps::allocate_shared_array<SCALAR>( (myrank == 0 ? array_size : 1), pcps::zero<SCALAR>());
        comm.Barrier();
        if (typeid(SCALAR) == typeid(double))
          comm.Reduce((void *)output_array.get(), (void *)reduce_target.get(), array_size, MPI::DOUBLE, MPI::SUM, 0);
        else if (typeid(SCALAR) == typeid(std::complex<double>))
          comm.Reduce((void *)output_array.get(), (void *)reduce_target.get(), array_size, MPI::DOUBLE_COMPLEX, MPI::SUM, 0);
        else
          throw pcps::Exception("unknown SCALAR type in pcps::compute_in_parallel");
        if (myrank == 0)
          output_array = reduce_target;
      } else if (farm_parallel)
        pcps::collect_expectation_values(slaves, jobs_in_progress, output_array.get());
      else
        throw pcps::Exception("unknown parallelism type");

    // slave processes in farm parallel mode
    } else if (farm_parallel) {

      // wait for instructions from the master process
      pcps::expectation_value_slave(userinp, wfn);

    } else
      throw pcps::Exception("unknown parallelism type");

    // the root process returns the array
    if (myrank == 0) return output_array;

    // non-root processes return an unassigned pointer
    return boost::shared_array<SCALAR>();

  }

}

#endif
