/*
 * graddescent.h
 *
 *  Created on: Mar 6, 2011
 *      Author: tqlong
 */

#ifndef GRADDESCENT_H_
#define GRADDESCENT_H_

#include <iostream>

#define STEPSIZE_TOLERANCE 1e-16

class paramset_type : public std::map<std::string, double>
{
public:
  bool contains(const std::string& pname) const { return this->find(pname) != this->end(); }
  double get(const std::string& pname) const { assert(contains(pname)); return this->find(pname)->second; }
};

/** Gradient descent algorithm
 *  F should have
 *    typename variable_type
 *    variable_type initVariable(), double evaluateFunction(x), void evaluateGradient(x, grad)
 *  F::variable_type should have
 *    y.add(a, x) <-- y = y+a*x
 *    y.dot(x)    <-- y'x
 */
template<typename F>
class GradientDescent
{
public:
  typedef F                           function_type;
  typedef typename F::variable_type   variable_type;
  enum optim_type { MINIMIZE = 0, MAXIMIZE };
protected:
  function_type& f_;
  optim_type type_;
  int maxiter_;
  double eta_;
  paramset_type params_;
public:
  GradientDescent(function_type& f)
    : f_(f), maxiter_(10), eta_(1.0) {}

  void setParams(const paramset_type& p)
  {
    params_ = p;
    if (params_.contains("iter")) this->maxiter_ = params_.get("iter");  // maximum number of iterations
    if (params_.contains("eta")) this->eta_ = params_.get("eta");        // learning rate
  }

  double fval(const variable_type& x) {
    double f = f_.evaluateFunction(x);
    return type_==MINIMIZE ? f : -f;
  }

  void fgrad(const variable_type& x, variable_type& grad) {
    f_.evaluateGradient(x, grad);
    if (type_ == MAXIMIZE) grad *= -1.0;
  }

  void optimize(optim_type type = MINIMIZE)
  {
    type_ = type;
    std::cerr << "maxiter = " << maxiter_
              << " eta = " << eta_ << "\n";

    variable_type x = f_.currentVariable();
    double f0 = fval(x);

    std::cerr << "Init f0 = " << f0 << "\n";

    for (int iter = 0; iter < maxiter_; iter++) {
      variable_type grad = f_.initVariable();                 // init and
      fgrad(x, grad);                                         // compute gradient
      x.add(-eta_/(iter+1), grad);                            // x += (-eta_/iter_)*grad;
      double f = fval(x);                                     // calculate new function value
      std::cerr << "iter " << iter << " f = " << f << "\n";
    }
    f_.setCurrentVariable(x);
  }
};

/** Gradient descent algorithm with Wolfe line search
 *  F should have
 *    typename variable_type
 *    variable_type initVariable(), double evaluateFunction(x), void evaluateGradient(x, grad)
 *  F::variable_type should have
 *    y.add(a, x) <-- y = y+a*x
 *    y.dot(x)    <-- y'x
 */

template<typename F>
class GradientDescentLineSearch : public GradientDescent<F>
{
public:
  typedef F                           function_type;
  typedef typename F::variable_type   variable_type;
  typedef GradientDescent<F> __Base;
  //enum optim_type { MINIMIZE = 0, MAXIMIZE };
  typedef typename __Base::optim_type optim_type;
protected:
  double c0_, c1_;
public:
  GradientDescentLineSearch(function_type& f)
  : __Base(f), c0_(1e-4), c1_(0.9) {}

  void setParams(const paramset_type& p)
  {
    __Base::params_ = p;
    if (__Base::params_.contains("iter")) this->maxiter_ = __Base::params_.get("iter");  // maximum number of iterations
    if (__Base::params_.contains("c0")) this->c0_ = __Base::params_.get("c0");           // armijo parameter
    if (__Base::params_.contains("c1")) this->c1_ = __Base::params_.get("c1");           // curvature parameter
  }

  void optimize(optim_type type = __Base::MINIMIZE)
  {
    this->type_ = type;
    std::cerr << "maxiter = " << this->maxiter_
              << " c0 = " << c0_
              << " c1 = " << c1_
              << "\n";

    variable_type x = this->f_.currentVariable();
    double f0 = this->fval(x), f = f0;

    std::cerr << "Init f0 = " << f0 << "\n";

    for (int iter = 0; iter < this->maxiter_; iter++) {
      variable_type grad = this->f_.initVariable(),
                    newx = this->f_.initVariable();
      this->fgrad(x, grad);                                 // calculate gradient
      std::cerr << "iter " << iter << " f = " << f;
      double alpha = lineSearch(x, f, grad, grad, newx, f); // line search on direction -grad
      std::cerr << " alpha " << alpha << "\n";
      x = newx;
      if (alpha < STEPSIZE_TOLERANCE) break;
    }
    this->f_.setCurrentVariable(x);
  }

  /** search along direction -md, return step size */
  double lineSearch(const variable_type& x, double f,
      const variable_type& md, const variable_type& grad,
      variable_type& newx, double& newf)
  {
    double alpha = 1.0;                                   // hopefully, step size is 1
    variable_type newgrad = this->f_.initVariable();
    double dnew, dprod = -grad.dot(md);                   // dot product of search direction and gradient
    double sign = 1.0;

//    std::cerr << "ls -- dprod = " << dprod << "\n";
    if (dprod > 0) {
      std::cerr << "ls -- Search direction maybe wrong \n";
      dprod = -dprod;
      sign = -1.0;
    }
    newx = x;
    newx.add(-alpha*sign, md);
    newf = this->fval(newx);
    this->fgrad(newx, newgrad);
    dnew = -newgrad.dot(md);                              // dot product of search direction and gradient at new position

//    std::cerr << "ls -- alpha = " << alpha << " newf = " << newf << "\n";
    double save_alpha = -1.0;
    double save_f = 0.0;
    while ((newf != newf /*isnan*/
            || newf > f + c0_*alpha*dprod
            || dnew < c1_*dprod) && alpha >= STEPSIZE_TOLERANCE) {     // check armijo condition and curvature condition
//      std::cerr << "ls -- newf = " << newf << "\n";
      if (newf <= f + c0_*alpha*dprod && save_alpha < 0) {
        save_alpha = alpha;
        save_f = newf;
      }
      alpha /= 10.0;                                      // if not satisfied, scale step size
      newx = x;
      newx.add(-alpha*sign, md);                          // try new position
      newf = this->fval(newx);                            // calculate function value
//      std::cerr << "ls -- alpha = " << alpha << " newf = " << newf << "\n";
      newgrad = this->f_.initVariable();
      this->fgrad(newx, newgrad);                         // and gradient
      dnew = -newgrad.dot(md)*sign;                       // and dot product of search direction and gradient at new position
    }

    if (alpha < STEPSIZE_TOLERANCE) {                                  // report if step too small
      if (save_alpha > 0) {
        alpha = save_alpha;
        newx = x;
        newx.add(-alpha*sign, md);
        newf = save_f;
      }
      else {
        newx = x;
        newf = f;
        alpha = 0.0;
      }
    }
    return alpha;
  }
};

#endif /* GRADDESCENT_H_ */
