/*
 * functions.cc
 *
 *  Created on: Feb 21, 2013
 *      Author: drewlewis
 */

#include <iostream>
#include <iomanip>
#include <chemistry/molecule/molecule.h>
#include <chemistry/qc/basis/integral.h>
#include <chemistry/qc/basis/split.h>
#include "tiled_array.h"
#include <math/optimize/diis.h>
#include "Global.h"
#include <Eigen/Dense>
#include <ctime>
#include <TiledArray/eigen.h>

using namespace sc;
namespace TA = TiledArray;

/// Creates a D matrix given a coefficent matrix C
Matrix
make_D_AO( std::size_t n_occ, const Matrix &C){

    std::size_t n_basis = C.cols();
    Matrix D = Matrix::Zero(n_basis,n_basis);

    ///Basically computes the outer product of C but only over occupied orbitals.
    /// D(i,j) = \sum_i^(nocc) C(i,m) * C(j,m);
    for(std::size_t i = 0; i < n_basis; ++i){
        for(std::size_t j = 0; j < n_basis; ++j){
            for(std::size_t m = 0; m < n_occ; ++m){
                D(i,j) += C(i,m)*C(j,m);
            }
        }
    }

    return D;
}

Matrix
intial_density_guess(const Matrix &S, const Matrix &H, const int occ){

    // Get the intial guess at the density matrix by solving the eigenvalue
    // problem LL^{t} = S \ Eigensolve(H_core_AO) \ C = L^{-1} Evecs(H_core_AO)
    // This uses the trick of not shifting H_core_AO to the cholesky basis
    // since it turns out to improve convergence.

    Eigen::MatrixXd L = S.llt().matrixL();
    Eigen::MatrixXd Li = L.inverse();

    // Using the werid guess that works better sometimes.
    Eigen::SelfAdjointEigenSolver<Matrix> es(Li * H * Li.transpose());
    Matrix C = Li.transpose() * es.eigenvectors();

    /// R = \sum_i^{occ} \ket{C_i}\bra_{C_i}
    Matrix R = make_D_AO(occ,C);
    return R;
}

void
Purify( ARRAY2 &R, const ARRAY2 &S){

    // Purify an approximate density matrix so that it meets the requirements
    // of a density matirx mainly that
    // R = R^T
    // Trace(S.R) = Number of electrons * 0.5
    // All eigenvalues of the matrix are 1.
    //lets force symmetry via averaging.  R_{ij} = (R_{ij} + R_{ji}) / 2
    ARRAY2 Rtemp = (R("i,j") + R("j,i")) * 0.5;

    // This routine is essentially R = 3*R^2 - 2 * R^3 it only converges if R is
    // close enough to the correct answer.
    double rdiff;
    do {
        R.get_world().gop.fence();
        ARRAY2 RS = Rtemp("i,k") * S("k,j");

        /// Peform the actual iteration step.
        ARRAY2 RSR = RS("i,k") * Rtemp("k,j");
        ARRAY2 Rnew = 3 * RSR("i,j") - 2 * RS("i,m") * RSR("m,j");
        ARRAY2 Rnorm_temp = Rnew("i,j") - Rtemp("i,j");

        rdiff = TA::expressions::norm2( Rnorm_temp("i,j") );
        Rtemp = Rnew;

    } while(rdiff > 1e-8);
    R = Rtemp;
}

ARRAY2
Scom(const ARRAY2 &R, const ARRAY2 &X, const ARRAY2 &S){

    //Compute the S commutator of 2 matrices [R,X]_s = R.S.X - X.S.R
    ARRAY2 RSX = R("i,k") * S("k,n") * X("n,j");

    // RSX_{ij} = - RSX_{ji}
    return RSX("i,j") + RSX("j,i");
}

void
RotateR(ARRAY2 &R, const ARRAY2 &X, const ARRAY2 &S,
        const size_t order = 2){

    // This routine approximates the rotation of the density matrix which
    // looks like R(X) = e^{-XS} R e^{SX} via the BCH approximation
    // R(X) = R + [R,X]_s + 1/(2!) [[R,X]_s,X]_s + 1/(3!)[[[R,X]_s,X]_s,X]_s . . .
    ARRAY2 LHS = R("i,j");

    //Avoid computing factorial pieces
    double Fac[] = {1.0, 0.5, 0.166667, 0.0416667, 0.00833333, 0.00138889,
                    0.000198413, 0.0000248016, 2.75573e-6};

    //Compute the first S comumator because we always want at least 1st order
    TA::Array<double, 2> steps = Scom(R, X, S);
    for(std::size_t i = 0; i < order; ++i){

        //add the next order approximation to R and then compute the one for the
        //next round.  This will always compute one more step than asked for right
        //now, but it only returns the number of steps asked for.

        LHS("i,j") = LHS("i,j") + Fac[i] * steps("i,j");
        steps = Scom(steps, X, S);

    }

    R = LHS;
}

/**
 * Tries backtrack line search along P by minimizing the variational functional f(x) = 1/2 x.A.x - x.b
 * corresponding to the A.x = b linear system.
 *
 * All undocumented parameters are only used for recomputing gradients
 * @param P search direction
 * @param R gradient at the initial position
 */
int backtrack_step(const ARRAY2 &P, const ARRAY2 &R,
                    const ARRAY2 &F, ARRAY2 &X,
                    const ARRAY2 &D, const ARRAY2 &Esymm,
                    const ARRAY2 &S, const ARRAY2 &Eanti){

  // make a backtrack line-search step along the direction P
  const double backtrack_factor = 0.1;
  const double decrease_factor = 0.1;
  const int max_iterations = 20;
  const double desired_value_accuracy = 1e-4; // this should be provided by the user

  std::deque<double> values;
  int acceptable=0;
  int descent=1;
  int took_step=0;
  int using_step;

  TA::Array<double, 2> backtrack = (-1.0 * backtrack_factor) * P("i,j");
  TA::Array<double, 2> step = P;
  const double Pnorm = TA::expressions::norm2(P("i,j"));

  double step_R = TA::expressions::dot(step("i,j"), R("i,j"));
  std::cout << "backtrack: step_R = " << step_R << std::endl;

  // check if sufficient decrease is possible within the required accuracy
  if (desired_value_accuracy >= fabs(decrease_factor * step_R)) {
    std::cout << "Sufficient decrease cannot be achieved for the requested value accuracy. Skipping line search." << std::endl;
    X = X("i,j") + step("i,j");
    return 1;
  }

  // evaluate f(X)
  TA::Array<double, 2> FXD = F("i,n") * X("n,m") * D("m,j");
  TA::Array<double, 2> SXEs = S("i,n") * X("n,m") * Esymm("m,j");
  TA::Array<double, 2> R0 = 0.5 * (FXD("i,j") - FXD("j,i"))
                            - 0.25 * (SXEs("i,j") - SXEs("j,i")) - Eanti("i,j");
  double f0 = TA::expressions::dot(X("i,j"), R0("i,j"));

  // evaluate f(X+dX)
  ARRAY2 X1 = X("i,j") + step("i,j");
  TA::Array<double, 2> FXD_1 = F("i,n") * X1("n,m") * D("m,j");
  TA::Array<double, 2> SXEs_1 = S("i,n") * X1("n,m") * Esymm("m,j");
  TA::Array<double, 2> R1 = 0.5 * (FXD_1("i,j") - FXD_1("j,i"))
                            - 0.25 * (SXEs_1("i,j") - SXEs_1("j,i")) - Eanti("i,j");
  double f1 = TA::expressions::dot(X1("i,j"), R1("i,j"));

  std::cout << "backtrack: f0 = " << f0 << " f1 = " << f1 << std::endl;

  // check if line search is needed
  if (f1 <= f0 + decrease_factor * step_R) {
    std::cout << "Unscaled initial step yields sufficient decrease." << std::endl;
    X = X1;
    return 1;
  }

  std::cout << "Unscaled initial step does not yield a sufficient decrease, starting a backtrack line search."
    << std::endl;

  // perform a simple backtrack
  values.push_back(f1);
  for (int i = 0; i < max_iterations && !acceptable && descent; ++i) {

    step = step("i,j") + backtrack("i,j");

    if (TA::expressions::norm2(step("i,j"))
        >= 0.1 * Pnorm) {

      ++took_step;

      X1 = X("i,j") + step("i,j");
      TA::Array<double, 2> FXD_1 = F("i,n") * X1("n,m") * D("m,j");
      TA::Array<double, 2> SXEs_1 = S("i,n") * X1("n,m") * Esymm("m,j");
      f1 = TA::expressions::dot(X1("i,j"), 0.5 * (FXD_1("i,j") - FXD_1("j,i"))
                                - 0.25 * (SXEs_1("i,j") - SXEs_1("j,i")) - Eanti("i,j"));
      step_R = TA::expressions::dot(step("i,j"), R("i,j"));

      if (f1 < f0 + decrease_factor * step_R) {
        std::cout << "backtrack: f0 = " << f0 << " f1 = " << f1 << std::endl;
        std::cout << "Backtrack " << i + 1
            << " yields a sufficient decrease." << std::endl;
        acceptable = 1;
        using_step = i + 1;
      }
      else if (values.back() < f1) {
        std::cout << "Backtrack " << i + 1
            << " increases value; terminating search." << std::endl;
        acceptable = 1;
        using_step = i;
      }
      else {
        std::cout << "Backtrack " << i + 1
            << " does not yield a sufficient decrease." << std::endl;
        using_step = i + 1;
      }

      values.push_back(f1);
    }
    else {
      std::cout
          << "Search direction does not appear to be a descent direction;"
          << " terminating search." << std::endl;
      descent = 0;
    }
  }


  if ( !acceptable && descent ) {
    ExEnv::out0() << indent <<
      "Maximum number of backtrack iterations has been exceeded." << std::endl;
    acceptable = 1;
  }

  for(int i=0; i <= took_step; ++i) {
    if(i==0) std::cout << "initial step    " << " value: ";
    else std::cout << "backtrack step " << i << " value: ";
    std::cout << scprintf("%15.10lf", values.front()) << std::endl;
    values.pop_front();
  }

  if(descent) {
    std::cout << "Using step " << using_step << std::endl;

    // use next to last step if value went up
    if( using_step != took_step ) {
      X = X1("i,j") - backtrack("i,j");
    }
    else
      X = X1;
  }
  else {
    // leave X unchanged
  }

  // returning 0 only if search direction is not descent direction
  return acceptable;
}

void
conjgradmat(const ARRAY2 &F, ARRAY2 &X, const ARRAY2 &D,
            const ARRAY2 &Esymm, const ARRAY2 &S,
            const ARRAY2 &Eanti, const std::size_t max_iter){
    //Solve problem Eanti = F.X.D + D.X.F - 0.5( S.X.Esymm + Esymm.X.S)
    //Using a modified conjugate gradient method.

    //Timer
    double t0 = madness::wall_time();

    ARRAY2 FXD = X; // = F("i,n") * X("n,m") * D("m,j") but X = 0
    ARRAY2 SXEs = X; // = S("i,n") * X("n,m") * Esymm("m,j") but X = 0
    ARRAY2 RHS = X; // (FXD("i,j") - FXD("j,i"))
    // - 0.5 * (SXEs("i,j") - SXEs("j,i")); both of these are 0

    // Gradient = -Residual
    TA::Array<double, 2> R = Eanti; // (Eanti("i,j") - RHS("i,j")); RHS = 0

    double tf = madness::wall_time();
    std::cout << "Residual time = " << double(tf - t0)/ double(CLOCKS_PER_SEC)
                << std::endl;

    // Search direction is the opposite of gradient
    TA::Array<double, 2> P_i = R;

#if 0
    // try a line search step
    std::cout << "residual norm before line search = " << TA::expressions::norm2(R("i,j")) << std::endl;
    backtrack_step((0.1)*P_i("i,j"), (-1)*R("i,j"),
                   F, X, D, Esymm, S, Eanti);
    {
        TA::Array<double, 2> FXD = F("i,n") * X("n,m") * D("m,j");
        TA::Array<double, 2> SXEs = S("i,n") * X("n,m") * Esymm("m,j");

        // Residual (-gradient)
        TA::Array<double, 2> R = Eanti("i,j") - (FXD("i,j") - FXD("j,i"))
                                      + 0.5 * (SXEs("i,j") - SXEs("j,i"));
        std::cout << "residual norm after line search = " << TA::expressions::norm2(R("i,j")) << std::endl;
    }
    return;
#endif

    //Scalar product of the residual
    double Rnorm2 = TA::expressions::norm2(R("i,j"));
    double Rsold = Rnorm2 * Rnorm2;

    //Iterator and maximum number of iterations.
    std::size_t iter = 0;
    std::size_t maxiter = max_iter;

    TA::Array<double, 2> d_old = P_i;

  do{
    ARRAY2 FPD = F("i,n") * P_i("n,m") * D("m,j");
    ARRAY2 SPEs = S("i,n") * P_i("n,m") * Esymm("m,j");

    //Precompute what traditioally is A * p_i in regular cg method
    ARRAY2 Ap_i = (FPD("i,j") - FPD("j,i") -
        0.5 * (SPEs("i,j") - SPEs("j,i")) );

    //Forming alpha the optimized length to travel along p_i
    double alpha = Rsold / TA::expressions::dot(P_i("i,j"), Ap_i("i,j"));
    //std::cout << "alpha = " << alpha << std::endl;

    // that the new step
    ARRAY2 d = alpha * P_i("i,j");

    //Updating the unknown
    X("i,j") = X("i,j") +  d("i,j");

    // compute the angle between the previous and this step
#if 0

        if (iter > 0) {
            double theta;
            {
                theta = std::acos( TA::expressions::dot(d_old("i,j"), d("i,j")) /
                                   ( TA::expressions::norm2(d_old("i,j")) * TA::expressions::norm2(d("i,j")))
                );
                // convert to degrees
                theta *= 180.0 / M_PI;
            }
            std::cout << "theta = " << theta << std::endl;
        }
        // remember the old direction
        d_old = d;
#endif

        //Calculating the new residual
        R("i,j") = R("i,j") - alpha * Ap_i("i,j");
        double Rsnew = TA::expressions::norm2(R("i,j"));
        //std::cout << "||R||_2 = " << Rsnew << std::endl;
        Rsnew = Rsnew * Rsnew;

        //Finding a new search direction
        P_i("i,j") = R("i,j") + (Rsnew/Rsold) * P_i("i,j");
        Rsold = Rsnew;

        ++iter;
    }while( std::sqrt(Rsold) >= 1e-5 && iter < maxiter);
    if(iter == maxiter){
        std::cout << "Convergence Factor in Conjgradmat = " << std::sqrt(Rsold) <<
                        std::endl;
        //conjgradmat(F, X, D, Esymm, S, Eanti);
        std::cout << "The maximum number of iterations was reached \n";
    }
    F.get_world().gop.fence();

}

void
nonlinear_conjugate_gradient(const ARRAY2 &F, const ARRAY2 &D,
                             const ARRAY2 &S){
  // Residual = -gradient of E
  ARRAY2 R = - 8 * ( S("i,x") * D("x,y") * F("y,j") -
      F("i,x") * D("x,y") * S("y,j") );

  // First guess direction
  ARRAY2 D_i = R;
}

void Density_Update(ARRAY2 &R, const ARRAY2 &S,
                   const ARRAY2 &F, std::size_t iter, std::size_t rotation_order = 2){
  // Optimize the one electron density matrix to solve for the Hartree Fock
  // Energy using the second order approach to modifying R (The denisty matrix)
  // The equations being solved are
  // F.Rn.S - S.Rn.F = F.X.S.Rn.S + S.Rn.S.X.F - 0.5( S.X.Esymm + Esymm.X.S)
  // Esymm = F.Rn.S + S.Rn.F
  //
  // For more through explination see P.477 of Molecular Electronic-Structure Theory

  ///Precomputing contractions
  /// FRS is anti symmetric so FRS(i,j) = -FRS(j,i) = -SRF(i,j)
  ARRAY2 FRS = F("i,n") * R("n,m") * S("m,j");

  //Forming the RHS Eanti
  ARRAY2 Eanti = FRS("i,j") - FRS("j,i");
  //Forming Esymm from above
  ARRAY2 Esymm = FRS("i,j") + FRS("j,i");

  //Pre computing S.R.S into SRS
  ARRAY2 SRS = S("i,n") * R("n,m") * S("m,j");

  //Making the X matrix which is an anti-symmetric matrix that performs the
  //Rotations on R.
  ARRAY2 XD(S.get_world(), S.trange());
  XD.set_all_local(0.0);

  //Solving for XD using a modified conjugate gradient method. `
  conjgradmat(F, XD, SRS, Esymm, S, Eanti, iter * 20);

  /// For now this scales XD so that is is not too large.  If XD gets too large
  /// Then the purification step is not able to bring R back to idompotency.
  double scale = 0.1/TA::expressions::norminf(XD("i,j"));
  std::cout << "scale = " << scale << std::endl;
  XD("i,j") = XD("i,j") * std::min(1.0,scale);

  XD.get_world().gop.fence();
  //Rotating the Rm to Rn with X
  RotateR(R,XD,S,rotation_order);

  //Purifying Rn so that it meets the criteria for a valid density matrix
  Purify(R,S);
  R.get_world().gop.fence();
}
