/*
 * lbfgs.h
 *
 *  Created on: Mar 7, 2011
 *      Author: tqlong
 */

#ifndef LBFGS_H_
#define LBFGS_H_

#include "graddescent.h"
#include <vector>

/** Limited memory BFGS with Wolfe line search
 *  F should have
 *    typename variable_type
 *    variable_type initVariable() <-- init zero variable
 *    double evaluateFunction(x),
 *    void evaluateGradient(x, grad)
 *    double evaluateFuncGrad(x, grad) <-- combine both functions above (save redundancy computation)
 *    void mulDiagHessian(dir) <-- multiply with diagonal (inverse) hessian (see bfgs_update() below)
 *  F::variable_type should have
 *    y.add(a, x) <-- y = y+a*x
 *    y.dot(x)    <-- dot product <y,x>
 *    y.operator-(x) <-- z = y-x
 */

template<typename F>
class LBFGS : public GradientDescentLineSearch<F>
{
public:
  typedef F                            function_type;
  typedef typename F::variable_type    variable_type;
  typedef GradientDescentLineSearch<F> __Base;
  typedef typename __Base::optim_type  optim_type;
  typedef std::vector<variable_type>   vec_var_type;
protected:
  vec_var_type s_, y_;     // s_k = x_k - x_k-1, y_k = g_k - g_k-1
  variable_type old_x_, old_grad_;
  std::vector<double> r_;  // r_k = 1/<s_k, y_k>
  int m_, current_;        // m_ = size(s_) --> memory size
public:
  LBFGS(function_type& f)
  : __Base(f), old_x_(f.initVariable()), old_grad_(f.initVariable()),
    m_(3), current_(-2)
  {
    r_ = std::vector<double>(this->m_);
  }

  void setParams(const paramset_type& p)
  {
    __Base::params_ = p;
    if (__Base::params_.contains("iter")) this->maxiter_ = __Base::params_.get("iter");  // maximum number of iterations
    if (__Base::params_.contains("mem")) this->m_ = __Base::params_.get("mem");          // memory size
    if (__Base::params_.contains("c0")) this->c0_ = __Base::params_.get("c0");           // armijo parameter
    if (__Base::params_.contains("c1")) this->c1_ = __Base::params_.get("c1");           // curvature parameter
    if (__Base::params_.contains("eta")) this->eta_ = __Base::params_.get("eta");        // learning rate (gradient descent)
    r_ = std::vector<double>(this->m_);
  }

  double fevalgrad(const variable_type& x, variable_type& grad, variable_type* diagHessian)
  {
    double f = this->f_.evaluateFuncGrad(x, grad, diagHessian);
    if (this->type_ == __Base::MAXIMIZE) {
      f = -f;
      grad *= -1.0;
      if (diagHessian)
        (*diagHessian) *= -1.0;
    }
    return f;
  }

  void optimize(optim_type type = __Base::MINIMIZE)
  {
    this->type_ = type;
    std::cerr << "maxiter = " << this->maxiter_
              << " m = " << m_
              << " c0 = " << this->c0_
              << " c1 = " << this->c1_
              << "\n";

    variable_type x = this->f_.currentVariable();
    double f0 = this->fval(x), f = f0;
    s_.clear(); y_.clear(); current_ = -2;

    std::cerr << "Init f0 = " << f0 << "\n";

    for (int iter = 0; iter < this->maxiter_; iter++) {
      variable_type grad = this->f_.initVariable(),
                    newx = this->f_.initVariable(),
                    dir = this->f_.initVariable(),
                    diagHessian = this->f_.initVariable();
      f = fevalgrad(x, grad, &diagHessian);                 // calculate gradient and function value
      bfgs_store(x, grad);                                  // save new infomation to memory
      bfgs_direction(grad, diagHessian, dir);               // calculate direction (lbfgs)
      double grad_norm = ::sqrt(grad.dot(grad)), dir_norm = ::sqrt(grad.dot(grad));
      std::cerr << "iter " << iter << " f = " << f << " gradnorm = " << grad_norm
          << " dirnorm = " << dir_norm << " ";
      double alpha = lineSearch(x, f, dir, grad, newx, f);  // line search on direction -grad
      x = newx;
      std::cerr << " alpha " << alpha << "\n";
      if (alpha < STEPSIZE_TOLERANCE) {  // try a gradient step
//        x.add(-this->eta_/(iter+1), grad);
//        std::cerr << "alpha small try gradient descent " << this->eta_/(iter+1) << "\n";
        break;
      }
      if (grad_norm < 1e-6 || dir_norm < 1e-6)
        break;
    }
    this->f_.setCurrentVariable(x);
  }

  /** Save new information to the limited memory */
  void bfgs_store(const variable_type &x, const variable_type &grad)
  {
    if (current_ == -2) {  // first time
      current_ = -1;
    }
    else {
      if ((int)s_.size() < m_) {
        s_.push_back(x - old_x_);
        y_.push_back(grad - old_grad_);
        current_ = s_.size() - 1;
      }
      else {
        current_ = (current_+1) % m_;
        s_[current_] = x - old_x_;
        y_[current_] = grad - old_grad_;
      }
      r_[current_] = 1.0/(s_[current_].dot(y_[current_]));
      assert(r_[current_] == r_[current_]); // check nan
    }
    old_x_ = x;
    old_grad_ = grad;
//    std::cerr << "mem size = " << s_.size() << "\n";
    assert(s_.size() == y_.size());
  }

  void bfgs_direction(const variable_type &grad, const variable_type &diagHessian, variable_type &dir)
  {
    dir = grad;
//    std::cerr << "--bfgs dirnorm = " << ::sqrt(dir.dot(dir)) << "\n";
    if (s_.size() == 0)  {// first time, use gradient direction
//      std::cerr << "First time\n";
      return;
    }
    // later time
    int idx = current_;
    int memsize = (int)s_.size() < m_ ? s_.size() : m_;
    std::vector<double> a(m_), b(m_);

    // backward loop
    for (int k = 0; k < memsize; k++) {
      a[idx] = r_[idx] * dir.dot(s_[idx]);
      dir.add(-a[idx], y_[idx]);
      idx--;
      if (idx < 0) idx = s_.size()-1;  // loop around
//      std::cerr << "--bfgs1 dirnorm = " << ::sqrt(dir.dot(dir)) << "\n";
    }

//    variable_type d = dir;
    dir.mulElem(diagHessian);      // multiply element-wise with inverse of second derivative
                                   // if second derivative is not known, just do nothing
                                   // which is equivalent to multiplication with identity matrix
//    std::cerr << "--bfgs3 dirnorm = " << ::sqrt(dir.dot(dir)) << "\n";
//    d = d - dir;
//    std::cerr << "diff = " << ::sqrt(d.dot(d)) << "\n";


    idx = memsize < m_ ? 0 : current_+1;
    if (idx >= m_) idx = 0;

    // forward loop
    for (int k = 0; k < memsize; k++) {
      b[idx] = r_[idx]* dir.dot(y_[idx]);
      dir.add(a[idx]-b[idx], s_[idx]);
      idx++;
      if (idx >= m_) idx = 0;
//      std::cerr << "--bfgs2 dirnorm = " << ::sqrt(dir.dot(dir)) << "\n";
    }
  }
};

#endif /* LBFGS_H_ */
