#include <src/pcps.h>
#include <src/input.h>
#include <src/hamiltonian.h>
#include <src/matrix_actor.h>
#include <src/blas_lapack.h>
#include <src/lanczos.h>
#include <src/cluster.h>
#include <src/fullci.h>

//-----------------------------------------------------------------------------------
// pcps::fci::HamiltonianActor::HamiltonianActor -- constructor
//-----------------------------------------------------------------------------------
template <class SCALAR>
pcps::fci::HamiltonianActor<SCALAR>::HamiltonianActor(const pcps::Input & userinp, const pcps::Hamiltonian<SCALAR> & ham)
  : _n_a_sites(userinp.nsites()/2),
    _n_b_sites(userinp.nsites()/2),
    _n_a_particles(userinp.nparticles()/2),
    _n_b_particles(userinp.nparticles()/2),
    _n_a_config(pcps::n_choose_m(userinp.nsites()/2, userinp.nparticles()/2)),
    _n_b_config(pcps::n_choose_m(userinp.nsites()/2, userinp.nparticles()/2)),
    _thresh(1.0e-9)
{

  // ensure we are working with fermions
  if ( userinp.particle_type() != pcps::keyword::Part_Fermion )
    throw pcps::Exception( ( boost::format("pcps::fci::HamiltonianActor class only implemented for particles of type \"%s\"")
                             % pcps::keyword::Part_Fermion ).str() );

  // initialize configuration and index lookup vectors
  _config_a.assign(_n_a_sites, 0);
  _config_b.assign(_n_b_sites, 0);
  _index_lookup_a.assign(pcps::pow<int>(2, _n_a_sites), -1);
  _index_lookup_b.assign(pcps::pow<int>(2, _n_b_sites), -1);

  // initialize hamiltonian vectors
  for (typename std::vector<typename pcps::OpBase<SCALAR>::const_ptr>::const_iterator op = ham.ops().begin(); op != ham.ops().end(); op++) {

    if ( typeid(**op) == typeid(pcps::OpRL<SCALAR>) ) {

      if ( (**op).sites().at(0) % 2 != (**op).sites().at(1) % 2 )
        throw pcps::Exception( ( boost::format("full ci code currently supports only same-spin raising-lowering operators") ).str() );

      if ( (**op).sites().at(0) % 2 == 0 ) {
        _rl_ops_a.push_back( (**op).sites().at(0) / 2 );
        _rl_ops_a.push_back( (**op).sites().at(1) / 2 );
        _rl_ord_a.push_back( std::min((**op).sites().at(0), (**op).sites().at(1)) / 2 );
        _rl_ord_a.push_back( std::max((**op).sites().at(0), (**op).sites().at(1)) / 2 );
        _rl_coeff_a.push_back( (**op).coeff() );
      } else {
        _rl_ops_b.push_back( (**op).sites().at(0) / 2 );
        _rl_ops_b.push_back( (**op).sites().at(1) / 2 );
        _rl_ord_b.push_back( std::min((**op).sites().at(0), (**op).sites().at(1)) / 2 );
        _rl_ord_b.push_back( std::max((**op).sites().at(0), (**op).sites().at(1)) / 2 );
        _rl_coeff_b.push_back( (**op).coeff() );
      }

    } else if ( typeid(**op) == typeid(pcps::OpNN<SCALAR>) ) {

      if ( (**op).sites().at(0) % 2 == (**op).sites().at(1) % 2 )
        throw pcps::Exception( ( boost::format("full ci code currently supports only alpha-beta double number operators") ).str() );

      _nn_coeff_ab.push_back( (**op).coeff() );
      if ( (**op).sites().at(0) % 2 == 0 ) {
        _nn_ops_ab.push_back( (**op).sites().at(0) / 2);
        _nn_ops_ab.push_back( (**op).sites().at(1) / 2);
      } else {
        _nn_ops_ab.push_back( (**op).sites().at(1) / 2);
        _nn_ops_ab.push_back( (**op).sites().at(0) / 2);
      }

    } else
      throw pcps::Exception( ( boost::format("operator type \"%s\" is not supported by full ci code") % pcps::type_lookup(typeid(**op)) ).str() );

  }

  //std::cout << boost::format("alpha rl ops") << std::endl;
  //for (int i = 0; i < _rl_coeff_a.size(); i++)
  //  std::cout << boost::format(" %4i %4i %4i %4i  %20.4f")
  //               % _rl_ops_a.at(2*i)
  //               % _rl_ops_a.at(2*i+1)
  //               % _rl_ord_a.at(2*i)
  //               % _rl_ord_a.at(2*i+1)
  //               % _rl_coeff_a.at(i)
  //            << std::endl;
  //std::cout << std::endl;

  //std::cout << boost::format("beta rl ops") << std::endl;
  //for (int i = 0; i < _rl_coeff_b.size(); i++)
  //  std::cout << boost::format(" %4i %4i %4i %4i  %20.4f")
  //               % _rl_ops_b.at(2*i)
  //               % _rl_ops_b.at(2*i+1)
  //               % _rl_ord_b.at(2*i)
  //               % _rl_ord_b.at(2*i+1)
  //               % _rl_coeff_b.at(i)
  //            << std::endl;
  //std::cout << std::endl;

  //std::cout << boost::format("alpha-beta nn ops") << std::endl;
  //for (int i = 0; i < _nn_coeff_ab.size(); i++)
  //  std::cout << boost::format(" %4i %4i  %20.4f")
  //               % _nn_ops_ab.at(2*i)
  //               % _nn_ops_ab.at(2*i+1)
  //               % _nn_coeff_ab.at(i)
  //            << std::endl;
  //std::cout << std::endl;

  // populate alpha index lookup vectors
  {
    int i = 0;
    for (pcps::Cluster occ(_n_a_sites, 2); !occ.finished(); occ++)
      if ( std::accumulate(occ.ket_occs().begin(), occ.ket_occs().end(), 0) == _n_a_particles )
        _index_lookup_a.at(occ.cmpd()) = i++;
    assert( i == _n_a_config );
  }

  // populate beta index lookup vectors
  {
    int i = 0;
    for (pcps::Cluster occ(_n_b_sites, 2); !occ.finished(); occ++)
      if ( std::accumulate(occ.ket_occs().begin(), occ.ket_occs().end(), 0) == _n_b_particles )
        _index_lookup_b.at(occ.cmpd()) = i++;
    assert( i == _n_b_config );
  }

}

//-----------------------------------------------------------------------------------
// pcps::fci::HamiltonianActor::operate_by_A -- function to perform y = A d
//-----------------------------------------------------------------------------------

template <class SCALAR>
void pcps::fci::HamiltonianActor<SCALAR>::operate_by_A(const int n, SCALAR * const d, SCALAR * const y, const int myrank, const int tid) {

  // get MPI info
  const MPI::Comm & comm = MPI::COMM_WORLD;
  const int nproc = comm.Get_size();
  //const int myrank = comm.Get_rank();

  assert( n == _n_a_config * _n_b_config );

  // broadcast the input vector
  comm.Bcast(d, n * sizeof(SCALAR), MPI::CHAR, 0);

  // zero the output vector
  pcps::xscal(n, pcps::zero<SCALAR>(), y, 1);

  //std::cout << std::endl;

  // loop over alpha configurations
  for (pcps::Cluster aocc(_n_a_sites, 2); !aocc.finished(); aocc++) {

    // skip configurations with the wrong number of particles
    if ( std::accumulate(aocc.ket_occs().begin(), aocc.ket_occs().end(), 0) != _n_a_particles ) continue;

    // loop over beta configurations
    for (pcps::Cluster bocc(_n_b_sites, 2); !bocc.finished(); bocc++) {

      // skip configurations with the wrong number of particles
      if ( std::accumulate(bocc.ket_occs().begin(), bocc.ket_occs().end(), 0) != _n_b_particles ) continue;

      // compute the configuration's compound index
      //const int cmpd_index = _n_b_config * _index_lookup_a.at(aocc.cmpd()) + _index_lookup_b.at(bocc.cmpd());
      const int cmpd_index = this->get_index(aocc.ket_occs(), bocc.ket_occs());

      //// print the configuration
      //for (int i = 0; i < _n_a_sites; i++) std::cout << aocc.ket_occs().at(i);
      //std::cout << " ";
      //for (int i = 0; i < _n_b_sites; i++) std::cout << bocc.ket_occs().at(i);
      //std::cout << boost::format("    %10i   %20.12f") % cmpd_index % d[cmpd_index] << std::endl;

      // get the trial vector's coefficient for this configuration
      const SCALAR x = d[cmpd_index];

      int extras, my_min, my_max;
      const bool site_parallel = true;

      // alpha-beta nn operators
      extras = _nn_coeff_ab.size() % nproc;
      my_min = ( site_parallel ? myrank * ( _nn_coeff_ab.size() / nproc ) + std::min(myrank, extras) : 0 );
      my_max = ( site_parallel ? ( myrank + 1 ) * ( _nn_coeff_ab.size() / nproc ) + std::min(myrank + 1, extras) : _nn_coeff_ab.size() );
      for (int i = my_min; i < my_max; i++)
        y[cmpd_index] += _nn_coeff_ab[i] * double(aocc.ket_occs()[_nn_ops_ab[2*i]]) * double(bocc.ket_occs()[_nn_ops_ab[2*i+1]]) * x;

      // alpha rl operators
      extras = _rl_coeff_a.size() % nproc;
      my_min = ( site_parallel ? myrank * ( _rl_coeff_a.size() / nproc ) + std::min(myrank, extras) : 0 );
      my_max = ( site_parallel ? ( myrank + 1 ) * ( _rl_coeff_a.size() / nproc ) + std::min(myrank + 1, extras) : _rl_coeff_a.size() );
      for (int i = my_min; i < my_max; i++) {
        if ( aocc.ket_occs()[_rl_ops_a[2*i]] == 1 || aocc.ket_occs()[_rl_ops_a[2*i+1]] == 0 ) continue;
        double sign = 1.0;
        for (int j = _rl_ord_a[2*i] + 1; j < _rl_ord_a[2*i+1]; j++)
          sign *= 1.0 - 2.0 * aocc.ket_occs()[j];
        aocc.ket_occs()[_rl_ops_a[2*i]] = 1;
        aocc.ket_occs()[_rl_ops_a[2*i+1]] = 0;
        //y[_n_b_config * _index_lookup_a.at(aocc.cmpd()) + _index_lookup_b.at(bocc.cmpd())] += sign * _rl_coeff_a[i] * x;
        y[this->get_index(aocc.ket_occs(), bocc.ket_occs())] += sign * _rl_coeff_a[i] * x;
        aocc.ket_occs()[_rl_ops_a[2*i]] = 0;
        aocc.ket_occs()[_rl_ops_a[2*i+1]] = 1;
      }

      // beta rl operators
      extras = _rl_coeff_b.size() % nproc;
      my_min = ( site_parallel ? myrank * ( _rl_coeff_b.size() / nproc ) + std::min(myrank, extras) : 0 );
      my_max = ( site_parallel ? ( myrank + 1 ) * ( _rl_coeff_b.size() / nproc ) + std::min(myrank + 1, extras) : _rl_coeff_b.size() );
      for (int i = my_min; i < my_max; i++) {
        if ( bocc.ket_occs()[_rl_ops_b[2*i]] == 1 || bocc.ket_occs()[_rl_ops_b[2*i+1]] == 0 ) continue;
        double sign = 1.0;
        for (int j = _rl_ord_b[2*i] + 1; j < _rl_ord_b[2*i+1]; j++)
          sign *= 1.0 - 2.0 * bocc.ket_occs()[j];
        bocc.ket_occs()[_rl_ops_b[2*i]] = 1;
        bocc.ket_occs()[_rl_ops_b[2*i+1]] = 0;
        //y[_n_b_config * _index_lookup_a.at(aocc.cmpd()) + _index_lookup_b.at(bocc.cmpd())] += sign * _rl_coeff_b[i] * x;
        y[this->get_index(aocc.ket_occs(), bocc.ket_occs())] += sign * _rl_coeff_b[i] * x;
        bocc.ket_occs()[_rl_ops_b[2*i]] = 0;
        bocc.ket_occs()[_rl_ops_b[2*i+1]] = 1;
      }

    }

  }

  // reduce result
  std::vector<SCALAR> temp((myrank == 0 ? n : 1), pcps::zero<SCALAR>());
  pcps::reduce(comm, y, &temp.at(0), n, MPI::SUM, 0);
  if (myrank == 0) pcps::xcopy(n, &temp.at(0), 1, y, 1);

  //std::cout << std::endl;

}

//-----------------------------------------------------------------------------------
// pcps::fci::HamiltonianActor::print_vector -- prints the supplied vector
//-----------------------------------------------------------------------------------

template <class SCALAR>
void pcps::fci::HamiltonianActor<SCALAR>::print_vector(const int n, const SCALAR * const x) const {

  assert( n == _n_a_config * _n_b_config );

  // loop over alpha configurations
  for (pcps::Cluster aocc(_n_a_sites, 2); !aocc.finished(); aocc++) {

    // skip configurations with the wrong number of particles
    if ( std::accumulate(aocc.ket_occs().begin(), aocc.ket_occs().end(), 0) != _n_a_particles ) continue;

    // loop over beta configurations
    for (pcps::Cluster bocc(_n_b_sites, 2); !bocc.finished(); bocc++) {

      // skip configurations with the wrong number of particles
      if ( std::accumulate(bocc.ket_occs().begin(), bocc.ket_occs().end(), 0) != _n_b_particles ) continue;

      // compute the configuration's compound index
      //const int cmpd_index = _n_b_config * _index_lookup_a.at(aocc.cmpd()) + _index_lookup_b.at(bocc.cmpd());
      const int cmpd_index = this->get_index(aocc.ket_occs(), bocc.ket_occs());

      // print the configuration's indexing and value
      for (int i = 0; i < _n_a_sites; i++) std::cout << aocc.ket_occs().at(i);
      std::cout << " ";
      for (int i = 0; i < _n_b_sites; i++) std::cout << bocc.ket_occs().at(i);
      std::cout << boost::format(" %19.12e") % x[cmpd_index] << std::endl;

    }

  }

  std::cout << std::endl;

}

//-----------------------------------------------------------------------------------
// pcps::fci::HamiltonianActor::operate_by_M -- function to perform y = M d
//-----------------------------------------------------------------------------------

template <class SCALAR>
void pcps::fci::HamiltonianActor<SCALAR>::operate_by_M(const int n, SCALAR * const d, SCALAR * const y, const int myrank, const int tid) {
  throw pcps::Exception( ( boost::format("function operate_by_M is not implemented for the pcps::fci::HamiltonianActor class") ).str() );
}

//-----------------------------------------------------------------------------------
// pcps::fci::HamiltonianActor::operate_by_M_inv -- function to perform y = M^(-1) d
//-----------------------------------------------------------------------------------

template <class SCALAR>
void pcps::fci::HamiltonianActor<SCALAR>::operate_by_M_inv(const int n, SCALAR * const d, SCALAR * const y, const int myrank, const int tid) {
  throw pcps::Exception( ( boost::format("function operate_by_M_inv is not implemented for the pcps::fci::HamiltonianActor class") ).str() );
}

//-----------------------------------------------------------------------------------
// pcps::fci::HamiltonianActor::converged -- function to check if the iterations
//                                           have converged
//-----------------------------------------------------------------------------------

template <class SCALAR>
bool pcps::fci::HamiltonianActor<SCALAR>::converged(const double residual, const int myrank, const int tid) {

  // ensure we are not threading
  assert( tid == 0 );

  // compute return value on root process
  bool retval;
  if (myrank == 0 && tid == 0) {
    assert( residual >= 0.0 );
    retval = (residual < this->_thresh);
  }

  // broadcast return value to all processes
  if (tid == 0) MPI::COMM_WORLD.Bcast(&retval, 1, MPI::BOOL, 0);

  // return whether or not we have converged
  return retval;

}

//-----------------------------------------------------------------------------------
// pcps::fci::fullci -- Computes the minimum Hamiltonian eigenvalue and eigenvector
//                      in the full configuration interaction basis.
//-----------------------------------------------------------------------------------

template <class SCALAR> void pcps::fci::fullci(const pcps::Input & userinp, const pcps::Hamiltonian<SCALAR> & ham) {

  // get MPI info
  const MPI::Comm & comm = MPI::COMM_WORLD;
  const int nproc = comm.Get_size();
  const int myrank = comm.Get_rank();

  // create the object used to multiply by the hamiltonian
  pcps::fci::HamiltonianActor<SCALAR> ham_actor(userinp, ham);

  // get the maximum number of iterations
  const int max_iter = userinp.max_macro_loop();

  // prepare variables needed by the lanczos algorithm
  const int n = ham_actor.total_configs();
  double lambda = 0.0;
  std::vector<SCALAR> x(n, pcps::zero<SCALAR>());
  std::vector<SCALAR> v0(n, pcps::zero<SCALAR>());
  std::vector<SCALAR> v1(n, pcps::zero<SCALAR>());
  std::vector<SCALAR> t(max_iter*max_iter, pcps::zero<SCALAR>());
  std::vector<SCALAR> alpha(max_iter, pcps::zero<SCALAR>());
  std::vector<SCALAR> beta(max_iter, pcps::zero<SCALAR>());
  std::vector<double> w(max_iter, 0.0);
  std::vector<SCALAR> work(5*max_iter, pcps::zero<SCALAR>());
  std::vector<double> rwork(3*max_iter, 0.0);

  // run lanczos algorithm
  pcps::lanczos(n, max_iter, lambda, &x.at(0), &v0.at(0), &v1.at(0), &t.at(0), &alpha.at(0), &beta.at(0), &w.at(0),
                &work.at(0), &rwork.at(0), &ham_actor, userinp.fci_shift(), userinp.workdir());

  // print eigenvalue
  if (myrank == 0) {
    std::cout << boost::format("Full CI energy     %20.12f") % lambda << std::endl;
    std::cout << std::endl;
  }

  // print eigenvector
  if (myrank == 0 && userinp.verbose_print()) {
    std::cout << boost::format("Full CI vector:") << std::endl;
    std::cout << std::endl;
    ham_actor.print_vector(n, &x.at(0));
  }

}

// explicitly instantiate the template
template void pcps::fci::fullci(const pcps::Input &, const pcps::Hamiltonian< double               > &);
template void pcps::fci::fullci(const pcps::Input &, const pcps::Hamiltonian< std::complex<double> > &);
