#ifndef LBFGS_HPP
#define LBFGS_HPP

#include <Eigen/Eigen>
#include <algorithm>
#include <cmath>

namespace lbfgs {
/* -------------------------------------------------------------------------- */
/*                               Data Type Part                               */
/* -------------------------------------------------------------------------- */

/**
 * L-BFGS optimization parameters.
 */
struct lbfgs_parameter_t
{
  /**
   * The number of corrections to approximate the inverse hessian matrix.
   *  The L-BFGS routine stores the computation results of previous m
   *  iterations to approximate the inverse hessian matrix of the current
   *  iteration. This parameter controls the size of the limited memories
   *  (corrections). The default value is 8. Values less than 3 are
   *  not recommended. Large values will result in excessive computing time.
   */
  int mem_size = 8;

  /**
   * Epsilon for grad convergence test. DO NOT USE IT in nonsmooth cases!
   *  Set it to 0.0 and use past-delta-based test for nonsmooth functions.
   *  This parameter determines the accuracy with which the solution is to
   *  be found. A minimization terminates when
   *      ||g(x)||_inf / max(1, ||x||_inf) < g_epsilon,
   *  where ||.||_inf is the infinity norm. The default value is 1.0e-5.
   *  This should be greater than 1.0e-6 in practice because L-BFGS does
   *  not directly reduce first-order residual. It still needs the function
   *  value which can be corrupted by xtol when ||g|| is small.
   */
  double g_epsilon = 1.0e-5;

  /**
   * Distance for delta-based convergence test.
   *  This parameter determines the distance, in iterations, to compute
   *  the rate of decrease of the cost function. If the value of this
   *  parameter is zero, the library does not perform the delta-based
   *  convergence test. The default value is 3.
   */
  int past = 3;

  /**
   * Delta for convergence test.
   *  This parameter determines the minimum rate of decrease of the
   *  cost function. The library stops iterations when the following
   *  condition is met:
   *      |f' - f| / max(1, |f|) < delta,
   *  where f' is the cost value of past iterations ago, and f is
   *  the cost value of the current iteration.
   *  The default value is 1.0e-6.
   */
  double delta = 1.0e-6;

  /**
   * The maximum number of iterations.
   *  The lbfgs_optimize() function terminates an minimization process with
   *  ::LBFGSERR_MAXIMUMITERATION status code when the iteration count
   *  exceedes this parameter. Setting this parameter to zero continues an
   *  minimization process until a convergence or error. The default value
   *  is 0.
   */
  int max_iterations = 0;

  /**
   * The maximum number of trials for the line search.
   *  This parameter controls the number of function and gradients evaluations
   *  per iteration for the line search routine. The default value is 64.
   */
  int max_linesearch = 64;

  /**
   * The minimum step of the line search routine.
   *  The default value is 1.0e-20. This value need not be modified unless
   *  the exponents are too large for the machine being used, or unless the
   *  problem is extremely badly scaled (in which case the exponents should
   *  be increased).
   */
  double min_step = 1.0e-20;

  /**
   * The maximum step of the line search.
   *  The default value is 1.0e+20. This value need not be modified unless
   *  the exponents are too large for the machine being used, or unless the
   *  problem is extremely badly scaled (in which case the exponents should
   *  be increased).
   */
  double max_step = 1.0e+20;

  /**
   * A parameter to control the accuracy of the line search routine.
   *  The default value is 1.0e-4. This parameter should be greater
   *  than zero and smaller than 1.0.
   */
  double f_dec_coeff = 1.0e-4;

  /**
   * A parameter to control the accuracy of the line search routine.
   *  The default value is 0.9. If the function and gradient
   *  evaluations are inexpensive with respect to the cost of the
   *  iteration (which is sometimes the case when solving very large
   *  problems) it may be advantageous to set this parameter to a small
   *  value. A typical small value is 0.1. This parameter should be
   *  greater than the f_dec_coeff parameter and smaller than 1.0.
   */
  double s_curv_coeff = 0.9;

  /**
   * A parameter to ensure the global convergence for nonconvex functions.
   *  The default value is 1.0e-6. The parameter performs the so called
   *  cautious update for L-BFGS, especially when the convergence is
   *  not sufficient. The parameter must be positive but might as well
   *  be less than 1.0e-3 in practice.
   */
  double cautious_factor = 1.0e-6;

  /**
   * The machine precision for floating-point values. The default is 1.0e-16.
   *  This parameter must be a positive value set by a client program to
   *  estimate the machine precision.
   */
  double xtol = 1.0e-16;
};

/**
 * Return values of lbfgs_optimize().
 *  Roughly speaking, a negative value indicates an error.
 */
enum {
  /** L-BFGS reaches convergence. */
  LBFGS_CONVERGENCE = 0,
  /** L-BFGS satisfies stopping criteria. */
  LBFGS_STOP,

  /** Unknown error. */
  LBFGSERR_UNKNOWNERROR = -1024,
  /** The iteration has been canceled by the monitor callback. */
  LBFGSERR_CANCELED,
  /** Logic error. */
  LBFGSERR_LOGICERROR,
  /** Invalid number of variables specified. */
  LBFGSERR_INVALID_N,
  /** Invalid parameter lbfgs_parameter_t::mem_size specified. */
  LBFGSERR_INVALID_MEMSIZE,
  /** Invalid parameter lbfgs_parameter_t::g_epsilon specified. */
  LBFGSERR_INVALID_GEPSILON,
  /** Invalid parameter lbfgs_parameter_t::past specified. */
  LBFGSERR_INVALID_TESTPERIOD,
  /** Invalid parameter lbfgs_parameter_t::delta specified. */
  LBFGSERR_INVALID_DELTA,
  /** Invalid parameter lbfgs_parameter_t::min_step specified. */
  LBFGSERR_INVALID_MINSTEP,
  /** Invalid parameter lbfgs_parameter_t::max_step specified. */
  LBFGSERR_INVALID_MAXSTEP,
  /** Invalid parameter lbfgs_parameter_t::f_dec_coeff specified. */
  LBFGSERR_INVALID_FDECCOEFF,
  /** Invalid parameter lbfgs_parameter_t::s_curv_coeff specified. */
  LBFGSERR_INVALID_SCURVCOEFF,
  /** Invalid parameter lbfgs_parameter_t::xtol specified. */
  LBFGSERR_INVALID_MACHINEPREC,
  /** Invalid parameter lbfgs_parameter_t::max_linesearch specified. */
  LBFGSERR_INVALID_MAXLINESEARCH,
  /** The function value became NaN or Inf. */
  LBFGSERR_INVALID_FUNCVAL,
  /** The line-search step became smaller than lbfgs_parameter_t::min_step. */
  LBFGSERR_MINIMUMSTEP,
  /** The line-search step became larger than lbfgs_parameter_t::max_step. */
  LBFGSERR_MAXIMUMSTEP,
  /** Line search reaches the maximum, assumptions not satisfied or precision
     not achievable.*/
  LBFGSERR_MAXIMUMLINESEARCH,
  /** The algorithm routine reaches the maximum number of iterations. */
  LBFGSERR_MAXIMUMITERATION,
  /** Relative search interval width is at least lbfgs_parameter_t::xtol. */
  LBFGSERR_WIDTHTOOSMALL,
  /** A logic error (negative line-search step) occurred. */
  LBFGSERR_INVALIDPARAMETERS,
  /** The current search direction increases the cost function value. */
  LBFGSERR_INCREASEGRADIENT,
  /** The line-search step went out of the interval of uncertainty. */
  LBFGSERR_OUTOFINTERVAL,
  /** A logic error occurred; alternatively, the interval of uncertainty
  became too small. */
  LBFGSERR_INCORRECT_TMINMAX,
  /** A rounding error occurred; alternatively, no line-search step
  satisfies the sufficient decrease and curvature conditions. */
  LBFGSERR_ROUNDING_ERROR,

};

/**
 * Callback interface to provide cost function and gradient evaluations.
 *
 *  The lbfgs_optimize() function call this function to obtain the values of cost
 *  function and its gradients when needed. A client program must implement
 *  this function to evaluate the values of the cost function and its
 *  gradients, given current values of variables.
 *
 *  @param  instance    The user data sent for lbfgs_optimize() function by the client.
 *  @param  x           The current values of variables.
 *  @param  g           The gradient vector. The callback function must compute
 *                      the gradient values for the current variables.
 *  @retval double      The value of the cost function for the current variables.
 */
typedef double (*lbfgs_evaluate_t)(void *instance,
                                   const Eigen::VectorXd &x,
                                   Eigen::VectorXd &g);

/**
 * Callback interface to provide an upper bound at the beginning of the current line search.
 *
 *  The lbfgs_optimize() function call this function to obtain the values of the
 *  upperbound of the stepsize to search in, provided with the beginning values of
 *  variables before the line search, and the current step vector (can be descent direction).
 *  A client program can implement this function for more efficient linesearch. Any step
 *  larger than this bound should not be considered. For example, it has a very large or even
 *  inf function value. Note that the function value at the provided bound should be FINITE!
 *  If it is not used, just set it nullptr.
 *
 *  @param  instance    The user data sent for lbfgs_optimize() function by the client.
 *  @param  xp          The values of variables before current line search.
 *  @param  d           The step vector. It can be the descent direction.
 *  @retval double      The upperboud of the step in current line search routine,
 *                      such that (stpbound * d) is the maximum reasonable step.
 */
typedef double (*lbfgs_stepbound_t)(void *instance,
                                    const Eigen::VectorXd &xp,
                                    const Eigen::VectorXd &d);

/**
 * Callback interface to monitor the progress of the minimization process.
 *
 *  The lbfgs_optimize() function call this function for each iteration. Implementing
 *  this function, a client program can store or display the current progress
 *  of the minimization process. If it is not used, just set it nullptr.
 *
 *  @param  instance    The user data sent for lbfgs_optimize() function by the client.
 *  @param  x           The current values of variables.
 *  @param  g           The current gradient values of variables.
 *  @param  fx          The current value of the cost function.
 *  @param  step        The line-search step used for this iteration.
 *  @param  k           The iteration count.
 *  @param  ls          The number of evaluations called for this iteration.
 *  @retval int         Zero to continue the minimization process. Returning a
 *                      non-zero value will cancel the minimization process.
 */
typedef int (*lbfgs_progress_t)(void *instance,
                                const Eigen::VectorXd &x,
                                const Eigen::VectorXd &g,
                                const double fx,
                                const double step,
                                const int k,
                                const int ls);

/**
 * Callback data struct
 */
struct callback_data_t
{
  void *instance = nullptr;
  lbfgs_evaluate_t proc_evaluate = nullptr;
  lbfgs_stepbound_t proc_stepbound = nullptr;
  lbfgs_progress_t proc_progress = nullptr;
};

/* -------------------------------------------------------------------------- */
/*                               Arithmetic Part                              */
/* -------------------------------------------------------------------------- */

#define MIN_LBFGS(a, b)        ((a) <= (b) ? (a) : (b))
#define MAX_LBFGS(a, b)        ((a) >= (b) ? (a) : (b))
#define MAX3_LBFGS(a, b, c)    MAX_LBFGS(MAX_LBFGS((a), (b)), (c));
#define F_SIGNDIFF_LBFGS(x, y) ((x) * ((y) / fabs((y))) < 0.)

/**
 * Define the local variables for computing minimizers.
 */
#define USES_MINIMIZER_LBFGS \
  double a, d, gamma, theta, p, q, r, s;

/**
 * Find a minimizer of an interpolated cubic function.
 *  @param  cm      The minimizer of the interpolated cubic.
 *  @param  u       The value of one point, u.
 *  @param  fu      The value of f(u).
 *  @param  du      The value of f'(u).
 *  @param  v       The value of another point, v.
 *  @param  fv      The value of f(v).
 *  @param  du      The value of f'(v).
 */
#define CUBIC_MINIMIZER_LBFGS(cm, u, fu, du, v, fv, dv) \
  d = (v) - (u);                                        \
  theta = ((fu) - (fv)) * 3 / d + (du) + (dv);          \
  p = fabs(theta);                                      \
  q = fabs(du);                                         \
  r = fabs(dv);                                         \
  s = MAX3_LBFGS(p, q, r);                              \
  a = theta / s;                                        \
  gamma = s * sqrt(a * a - ((du) / s) * ((dv) / s));    \
  if ((v) < (u))                                        \
    gamma = -gamma;                                     \
  p = gamma - (du) + theta;                             \
  q = gamma - (du) + gamma + (dv);                      \
  r = p / q;                                            \
  (cm) = (u) + r * d;

/**
 * Find a minimizer of an interpolated cubic function.
 *  @param  cm      The minimizer of the interpolated cubic.
 *  @param  u       The value of one point, u.
 *  @param  fu      The value of f(u).
 *  @param  du      The value of f'(u).
 *  @param  v       The value of another point, v.
 *  @param  fv      The value of f(v).
 *  @param  dv      The value of f'(v).
 *  @param  xmin    The minimum value.
 *  @param  xmax    The maximum value.
 */
#define CUBIC_MINIMIZER2_LBFGS(cm, u, fu, du, v, fv, dv, xmin, xmax) \
  d = (v) - (u);                                                     \
  theta = ((fu) - (fv)) * 3 / d + (du) + (dv);                       \
  p = fabs(theta);                                                   \
  q = fabs(du);                                                      \
  r = fabs(dv);                                                      \
  s = MAX3_LBFGS(p, q, r);                                           \
  a = theta / s;                                                     \
  gamma = s * sqrt(MAX_LBFGS(0, a * a - ((du) / s) * ((dv) / s)));   \
  if ((u) < (v))                                                     \
    gamma = -gamma;                                                  \
  p = gamma - (dv) + theta;                                          \
  q = gamma - (dv) + gamma + (du);                                   \
  r = p / q;                                                         \
  if (r < 0. && gamma != 0.) {                                       \
    (cm) = (v)-r * d;                                                \
  }                                                                  \
  else if (d > 0) {                                                  \
    (cm) = (xmax);                                                   \
  }                                                                  \
  else {                                                             \
    (cm) = (xmin);                                                   \
  }

/**
 * Find a minimizer of an interpolated quadratic function.
 *  @param  qm      The minimizer of the interpolated quadratic.
 *  @param  u       The value of one point, u.
 *  @param  fu      The value of f(u).
 *  @param  du      The value of f'(u).
 *  @param  v       The value of another point, v.
 *  @param  fv      The value of f(v).
 */
#define QUAD_MINIMIZER_LBFGS(qm, u, fu, du, v, fv) \
  a = (v) - (u);                                   \
  (qm) = (u) + (du) / (((fu) - (fv)) / a + (du)) / 2 * a;

/**
 * Find a minimizer of an interpolated quadratic function.
 *  @param  qm      The minimizer of the interpolated quadratic.
 *  @param  u       The value of one point, u.
 *  @param  du      The value of f'(u).
 *  @param  v       The value of another point, v.
 *  @param  dv      The value of f'(v).
 */
#define QUAD_MINIMIZER2_LBFGS(qm, u, du, v, dv) \
  a = (u) - (v);                                \
  (qm) = (v) + (dv) / ((dv) - (du)) * a;

/* -------------------------------------------------------------------------- */
/*                                 L-BFGS Part                                */
/* -------------------------------------------------------------------------- */

/**
 * Update a safeguarded trial value and interval for line search.
 *
 *  The parameter x represents the step with the least function value.
 *  The parameter t represents the current step. This function assumes
 *  that the derivative at the point of x in the direction of the step.
 *  If the bracket is set to true, the minimizer has been bracketed in
 *  an interval of uncertainty with endpoints between x and y.
 *
 *  @param  x       The pointer to the value of one endpoint.
 *  @param  fx      The pointer to the value of f(x).
 *  @param  dx      The pointer to the value of f'(x).
 *  @param  y       The pointer to the value of another endpoint.
 *  @param  fy      The pointer to the value of f(y).
 *  @param  dy      The pointer to the value of f'(y).
 *  @param  t       The pointer to the value of the trial value, t.
 *  @param  ft      The pointer to the value of f(t).
 *  @param  dt      The pointer to the value of f'(t).
 *  @param  tmin    The minimum value for the trial value, t.
 *  @param  tmax    The maximum value for the trial value, t.
 *  @param  brackt  The pointer to the predicate if the trial value is
 *                  bracketed.
 *  @retval int     Status value. Zero indicates a normal termination.
 *
 *  @see
 *      Jorge J. More and David J. Thuente. Line search algorithm with
 *      guaranteed sufficient decrease. ACM Transactions on Mathematical
 *      Software (TOMS), Vol 20, No 3, pp. 286-307, 1994.
 */
inline int update_trial_interval(double &x,
                                 double &fx,
                                 double &dx,
                                 double &y,
                                 double &fy,
                                 double &dy,
                                 double &t,
                                 double &ft,
                                 double &dt,
                                 const double tmin,
                                 const double tmax,
                                 int &brackt)
{
  int bound;
  int dsign = F_SIGNDIFF_LBFGS(dt, dx);
  double mc;            /* minimizer of an interpolated cubic. */
  double mq;            /* minimizer of an interpolated quadratic. */
  double newt;          /* new trial value. */
  USES_MINIMIZER_LBFGS; /* for CUBIC_MINIMIZER and QUAD_MINIMIZER. */

  /* Check the input parameters for errors. */
  if (brackt) {
    if (t <= MIN_LBFGS(x, y) || MAX_LBFGS(x, y) <= t) {
      /* The trival value t is out of the interval. */
      return LBFGSERR_OUTOFINTERVAL;
    }
    if (0. <= dx * (t - x)) {
      /* The function must decrease from x. */
      return LBFGSERR_INCREASEGRADIENT;
    }
    if (tmax < tmin) {
      /* Incorrect tmin and tmax specified. */
      return LBFGSERR_INCORRECT_TMINMAX;
    }
  }

  /*
  Trial value selection.
  */
  if (fx < ft) {
    /*
    Case 1: a higher function value.
    The minimum is brackt. If the cubic minimizer is closer
    to x than the quadratic one, the cubic one is taken, else
    the average of the minimizers is taken.
    */
    brackt = 1;
    bound = 1;
    CUBIC_MINIMIZER_LBFGS(mc, x, fx, dx, t, ft, dt);
    QUAD_MINIMIZER_LBFGS(mq, x, fx, dx, t, ft);
    if (fabs(mc - x) < fabs(mq - x)) {
      newt = mc;
    }
    else {
      newt = mc + 0.5 * (mq - mc);
    }
  }
  else if (dsign) {
    /*
    Case 2: a lower function value and derivatives of
    opposite sign. The minimum is brackt. If the cubic
    minimizer is closer to x than the quadratic (secant) one,
    the cubic one is taken, else the quadratic one is taken.
    */
    brackt = 1;
    bound = 0;
    CUBIC_MINIMIZER_LBFGS(mc, x, fx, dx, t, ft, dt);
    QUAD_MINIMIZER2_LBFGS(mq, x, dx, t, dt);
    if (fabs(mc - t) > fabs(mq - t)) {
      newt = mc;
    }
    else {
      newt = mq;
    }
  }
  else if (fabs(dt) < fabs(dx)) {
    /*
    Case 3: a lower function value, derivatives of the
    same sign, and the magnitude of the derivative decreases.
    The cubic minimizer is only used if the cubic tends to
    inf_inity in the direction of the minimizer or if the minimum
    of the cubic is beyond t. Otherwise the cubic minimizer is
    defined to be either tmin or tmax. The quadratic (secant)
    minimizer is also computed and if the minimum is brackt
    then the the minimizer closest to x is taken, else the one
    farthest away is taken.
     */
    bound = 1;
    CUBIC_MINIMIZER2_LBFGS(mc, x, fx, dx, t, ft, dt, tmin, tmax);
    QUAD_MINIMIZER2_LBFGS(mq, x, dx, t, dt);
    if (brackt) {
      if (fabs(t - mc) < fabs(t - mq)) {
        newt = mc;
      }
      else {
        newt = mq;
      }
    }
    else {
      if (fabs(t - mc) > fabs(t - mq)) {
        newt = mc;
      }
      else {
        newt = mq;
      }
    }
  }
  else {
    /*
    Case 4: a lower function value, derivatives of the
    same sign, and the magnitude of the derivative does
    not decrease. If the minimum is not brackt, the step
    is either tmin or tmax, else the cubic minimizer is taken.
    */
    bound = 0;
    if (brackt) {
      CUBIC_MINIMIZER_LBFGS(newt, t, ft, dt, y, fy, dy);
    }
    else if (x < t) {
      newt = tmax;
    }
    else {
      newt = tmin;
    }
  }

  /*
  Update the interval of uncertainty. This update does not
  depend on the new step or the case analysis above.

  - Case a: if f(x) < f(t),
      x <- x, y <- t.
  - Case b: if f(t) <= f(x) && f'(t)*f'(x) > 0,
      x <- t, y <- y.
  - Case c: if f(t) <= f(x) && f'(t)*f'(x) < 0,
      x <- t, y <- x.
   */
  if (fx < ft) {
    /* Case a */
    y = t;
    fy = ft;
    dy = dt;
  }
  else {
    /* Case c */
    if (dsign) {
      y = x;
      fy = fx;
      dy = dx;
    }
    /* Cases b and c */
    x = t;
    fx = ft;
    dx = dt;
  }

  /* Clip the new trial value in [tmin, tmax]. */
  if (tmax < newt)
    newt = tmax;
  if (newt < tmin)
    newt = tmin;

  /*
  Redefine the new trial value if it is close to the upper bound
  of the interval.
  */
  if (brackt && bound) {
    mq = x + 0.66 * (y - x);
    if (x < y) {
      if (mq < newt)
        newt = mq;
    }
    else {
      if (newt < mq)
        newt = mq;
    }
  }

  /* Return the new trial value. */
  t = newt;
  return 0;
}

inline int line_search_morethuente(Eigen::VectorXd &x,
                                   double &f,
                                   Eigen::VectorXd &grad,
                                   double &stp,
                                   const Eigen::VectorXd &direction,
                                   const Eigen::VectorXd &x_last,
                                   const Eigen::VectorXd &grad_last,
                                   const callback_data_t &cd,
                                   const lbfgs_parameter_t &param)
{
  int count = 0;
  int brackt, stage1, uinfo = 0;
  double dg_now;
  double stp_best, f_best, dg_best;
  double stp_end, f_end, dg_end;
  double fxm, dgxm, fym, dgym, fm, dgm;
  double f_init, f_test1, dg_init, dg_test;
  double width, prev_width;
  double st_min, st_max;
  const double stp_min = param.min_step;
  const double stp_max = param.max_step;

  /* Check the input parameters for errors. */
  if (stp <= 0.) {
    return LBFGSERR_INVALIDPARAMETERS;
  }

  /* Compute the initial gradient in the search direction. */
  dg_init = grad.dot(direction);

  /* Make sure that direction points to a descent direction. */
  if (0 < dg_init) {
    return LBFGSERR_INCREASEGRADIENT;
  }

  /* Initialize local variables. */
  brackt = 0;
  stage1 = 1;
  f_init = f;
  dg_test = param.f_dec_coeff * dg_init;
  width = stp_max - stp_min;
  prev_width = 2.0 * width;

  /*
  The variables stp_best, f_best, dg_best contain the values of the step,
  function, and directional derivative at the best step.
  The variables stp_end, f_end, dg_end contain the value of the step,
  function, and derivative at the other endpoint of
  the interval of uncertainty.
  The variables stp, f, dg_now contain the values of the step,
  function, and derivative at the current step.
  */
  stp_best = stp_end = 0.;
  f_best = f_end = f_init;
  dg_best = dg_end = dg_init;

  for (;;) {
    /* Report the progress. */
    if (cd.proc_progress) {
      if (cd.proc_progress(cd.instance, x, grad, f, stp, 0, 0)) {
        return LBFGSERR_CANCELED;
      }
    }

    /*
    Set the minimum and maximum steps to correspond to the
    present interval of uncertainty.
    */
    if (brackt) {
      st_min = MIN_LBFGS(stp_best, stp_end);
      st_max = MAX_LBFGS(stp_best, stp_end);
    }
    else {
      st_min = stp_best;
      st_max = stp + 4.0 * (stp - stp_best);
    }

    /* Clip the step in the range of [stp_min, stp_max]. */
    if (stp < stp_min)
      stp = stp_min;
    if (stp_max < stp)
      stp = stp_max;

    /*
    If an unusual termination is to occur then let
    stp be the lowest point obtained so far.
    */
    if ((brackt && ((stp <= st_min || st_max <= stp) ||
                    param.max_linesearch <= count + 1 || uinfo != 0)) ||
        (brackt && (st_max - st_min <= param.xtol * st_max))) {
      stp = stp_best;
    }

    /*
    Compute the current value of x:
        x <- x + (stp) * direction.
    */
    x = x_last + stp * direction;

    /* Evaluate the function and gradient values. */
    f = cd.proc_evaluate(cd.instance, x, grad);
    dg_now = grad.dot(direction);

    f_test1 = f_init + stp * dg_test;
    ++count;

    /* Test for errors and convergence. */
    if (brackt && ((stp <= st_min || st_max <= stp) || uinfo != 0)) {
      /* Rounding errors prevent further progress. */
      return LBFGSERR_ROUNDING_ERROR;
    }
    if (stp == stp_max && f <= f_test1 && dg_now <= dg_test) {
      /* The step is the maximum value. */
      return LBFGSERR_MAXIMUMSTEP;
    }
    if (stp == stp_min && (f_test1 < f || dg_test <= dg_now)) {
      /* The step is the minimum value. */
      return LBFGSERR_MINIMUMSTEP;
    }
    if (brackt && (st_max - st_min) <= param.xtol * st_max) {
      /* Relative width of the interval of uncertainty is at most xtol. */
      return LBFGSERR_WIDTHTOOSMALL;
    }
    if (param.max_linesearch <= count) {
      /* Maximum number of iteration. */
      return LBFGSERR_MAXIMUMLINESEARCH;
    }
    if (f <= f_test1 && fabs(dg_now) <= param.s_curv_coeff * (-dg_init)) {
      /* The sufficient decrease condition and the strong curvature condition hold. */
      return count;
    }

    /*
    In the first stage we seek a step for which the modified
    function has a nonpositive value and nonnegative derivative.
    */
    if (stage1 && f <= f_test1 &&
        MIN_LBFGS(param.f_dec_coeff, param.s_curv_coeff) * dg_init <= dg_now) {
      stage1 = 0;
    }

    /*
    A modified function is used to predict the step only if
    we have not obtained a step for which the modified
    function has a nonpositive function value and nonnegative
    derivative, and if a lower function value has been
    obtained but the decrease is not sufficient.
    */
    if (stage1 && f_test1 < f && f <= f_best) {
      /* Define the modified function and derivative values. */
      fm = f - stp * dg_test;
      fxm = f_best - stp_best * dg_test;
      fym = f_end - stp_end * dg_test;
      dgm = dg_now - dg_test;
      dgxm = dg_best - dg_test;
      dgym = dg_end - dg_test;

      /*
      Call update_trial_interval() to update the interval of
      uncertainty and to compute the new step.
      */
      uinfo = update_trial_interval(
          stp_best, fxm, dgxm,
          stp_end, fym, dgym,
          stp, fm, dgm,
          st_min, st_max, brackt);

      /* Reset the function and gradient values for f. */
      f_best = fxm + stp_best * dg_test;
      f_end = fym + stp_end * dg_test;
      dg_best = dgxm + dg_test;
      dg_end = dgym + dg_test;
    }
    else {
      /*
      Call update_trial_interval() to update the interval of
      uncertainty and to compute the new step.
      */
      uinfo = update_trial_interval(
          stp_best, f_best, dg_best,
          stp_end, f_end, dg_end,
          stp, f, dg_now,
          st_min, st_max, brackt);
    }

    /*
    Force a sufficient decrease in the interval of uncertainty.
    */
    if (brackt) {
      if (0.66 * prev_width <= fabs(stp_end - stp_best)) {
        stp = stp_best + 0.5 * (stp_end - stp_best);
      }
      prev_width = width;
      width = fabs(stp_end - stp_best);
    }
  }

  return LBFGSERR_LOGICERROR;
}

/**
 * Line search method for smooth or nonsmooth functions.
 *  This function performs line search to find a point that satisfy
 *  both the Armijo condition and the weak Wolfe condition. It is
 *  as robust as the backtracking line search but further applies
 *  to continuous and piecewise smooth functions where the strong
 *  Wolfe condition usually does not hold.
 *
 *  @see
 *      Adrian S. Lewis and Michael L. Overton. Nonsmooth optimization
 *      via quasi-Newton methods. Mathematical Programming, Vol 141,
 *      No 1, pp. 135-163, 2013.
 *
 * @param x         The current values of variables.
 * @param f         The current value of the cost function.
 * @param grad         The current gradient values of variables.
 * @param stp       The current step length to search in.
 * @param direction         The current step direction vector.
 * @param x_last        The values of variables before current line search.
 * @param grad_last        The gradient values of variables before current line search.
 * @param stp_min    The minimum step to search in.
 * @param stp_max    The maximum step to search in.
 * @param cd        The callback data.
 * @param param     The parameters for L-BFGS optimization.
 */
inline int line_search_lewisoverton(Eigen::VectorXd &x,
                                    double &f,
                                    Eigen::VectorXd &grad,
                                    double &stp,
                                    const Eigen::VectorXd &direction,
                                    const Eigen::VectorXd &x_last,
                                    const Eigen::VectorXd &grad_last,
                                    const callback_data_t &cd,
                                    const lbfgs_parameter_t &param)
{
  int count = 0;
  const double stp_min = param.min_step;
  const double stp_max = param.max_step;
  bool brackt = false, touched = false;
  double finit, dginit, dgtest, dstest;
  double mu = 0.0, nu = stp_max;

  /* Check the input parameters for errors. */
  if (!(stp > 0.0)) {
    return LBFGSERR_INVALIDPARAMETERS;
  }

  /* Compute the initial gradient in the search direction. */
  dginit = grad_last.dot(direction);

  /* Make sure that direction points to a descent direction. */
  if (0.0 < dginit) {
    return LBFGSERR_INCREASEGRADIENT;
  }

  /* The initial value of the cost function. */
  finit = f;
  dgtest = param.f_dec_coeff * dginit;
  dstest = param.s_curv_coeff * dginit;

  while (true) {
    /* Report the progress. */
    if (cd.proc_progress) {
      if (cd.proc_progress(cd.instance, x, grad, f, stp, 0, 0)) {
        return LBFGSERR_CANCELED;
      }
    }

    x = x_last + stp * direction;

    /* Evaluate the function and gradient values. */
    f = cd.proc_evaluate(cd.instance, x, grad);
    ++count;

    /* Test for errors. */
    if (std::isinf(f) || std::isnan(f)) {
      return LBFGSERR_INVALID_FUNCVAL;
    }
    /* Check the Armijo condition. */
    if (f > finit + stp * dgtest) {
      nu = stp;
      brackt = true;
    }
    else {
      /* Check the weak Wolfe condition. */
      if (grad.dot(direction) < dstest) {
        mu = stp;
      }
      else {
        return count;
      }
    }
    if (param.max_linesearch <= count) {
      /* Maximum number of iteration. */
      return LBFGSERR_MAXIMUMLINESEARCH;
    }
    if (brackt && (nu - mu) < param.xtol * nu) {
      /* Relative interval width is at least xtol. */
      return LBFGSERR_WIDTHTOOSMALL;
    }

    if (brackt) {
      stp = 0.5 * (mu + nu);
    }
    else {
      stp *= 2.0;
    }

    if (stp < stp_min) {
      /* The step is the minimum value. */
      return LBFGSERR_MINIMUMSTEP;
    }
    if (stp > stp_max) {
      if (touched) {
        /* The step is the maximum value. */
        return LBFGSERR_MAXIMUMSTEP;
      }
      else {
        /* The maximum value should be tried once. */
        touched = true;
        stp = stp_max;
      }
    }
  }
}

/**
 * Start a L-BFGS optimization.
 * Assumptions: 1. f(x) is either C2 or C0 but piecewise C2;
 *              2. f(x) is lower bounded;
 *              3. f(x) has bounded level sets;
 *              4. g(x) is either the gradient or subgradient;
 *              5. The gradient exists at the initial guess x0.
 * A user must implement a function compatible with ::lbfgs_evaluate_t (evaluation
 * callback) and pass the pointer to the callback function to lbfgs_optimize()
 * arguments. Similarly, a user can implement a function compatible with
 * ::lbfgs_stepbound_t to provide an external upper bound for stepsize, and
 * ::lbfgs_progress_t (progress callback) to obtain the current progress
 * (e.g., variables, function, and gradient, etc) and to cancel the iteration
 * process if necessary. Implementation of the stepbound and the progress callback
 * is optional: a user can pass nullptr if progress notification is not necessary.
 *
 *
 *  @param  x               The vector of decision variables.
 *                          THE INITIAL GUESS x0 SHOULD BE SET BEFORE THE CALL!
 *                          A client program can receive decision variables
 *                          through this vector, at which the cost and its
 *                          gradient are queried during minimization.
 *  @param  f               The ref to the variable that receives the final
 *                          value of the cost function for the variables.
 *  @param  proc_evaluate   The callback function to provide function f(x) and
 *                          gradient g(x) evaluations given a current values of
 *                          variables x. A client program must implement a
 *                          callback function compatible with lbfgs_evaluate_t
 *                          and pass the pointer to the callback function.
 *  @param  proc_stepbound  The callback function to provide values of the
 *                          upperbound of the stepsize to search in, provided
 *                          with the beginning values of variables before the
 *                          line search, and the current step vector (can be
 *                          negative gradient). A client program can implement
 *                          this function for more efficient linesearch. If it is
 *                          not used, just set it nullptr.
 *  @param  proc_progress   The callback function to receive the progress
 *                          (the number of iterations, the current value of
 *                          the cost function) of the minimization
 *                          process. This argument can be set to nullptr if
 *                          a progress report is unnecessary.
 *  @param  instance        A user data pointer for client programs. The callback
 *                          functions will receive the value of this argument.
 *  @param  param           The parameters for L-BFGS optimization.
 *  @retval int             The status code. This function returns a nonnegative
 *                          integer if the minimization process terminates without
 *                          an error. A negative integer indicates an error.
 */
inline int lbfgs_optimize(Eigen::VectorXd &x,
                          double &f,
                          lbfgs_evaluate_t proc_evaluate,
                          lbfgs_stepbound_t proc_stepbound,
                          lbfgs_progress_t proc_progress,
                          void *instance,
                          const lbfgs_parameter_t &param)
{
  int ret, i, j, k, ls, end, bound;
  double step, step_min, step_max, fx, ys, yy;
  double gnorm_inf, xnorm_inf, beta, rate, cau;

  const int n = x.size();
  const int m = param.mem_size;

  /* Check the input parameters for errors. */
  if (n <= 0) {
    return LBFGSERR_INVALID_N;
  }
  if (m <= 0) {
    return LBFGSERR_INVALID_MEMSIZE;
  }
  if (param.g_epsilon < 0.0) {
    return LBFGSERR_INVALID_GEPSILON;
  }
  if (param.past < 0) {
    return LBFGSERR_INVALID_TESTPERIOD;
  }
  if (param.delta < 0.0) {
    return LBFGSERR_INVALID_DELTA;
  }
  if (param.min_step < 0.0) {
    return LBFGSERR_INVALID_MINSTEP;
  }
  if (param.max_step < param.min_step) {
    return LBFGSERR_INVALID_MAXSTEP;
  }
  if (!(param.f_dec_coeff > 0.0 &&
        param.f_dec_coeff < 1.0)) {
    return LBFGSERR_INVALID_FDECCOEFF;
  }
  if (!(param.s_curv_coeff < 1.0 &&
        param.s_curv_coeff > param.f_dec_coeff)) {
    return LBFGSERR_INVALID_SCURVCOEFF;
  }
  if (!(param.xtol > 0.0)) {
    return LBFGSERR_INVALID_MACHINEPREC;
  }
  if (param.max_linesearch <= 0) {
    return LBFGSERR_INVALID_MAXLINESEARCH;
  }

  /* Prepare intermediate variables. */
  Eigen::VectorXd xp(n);
  Eigen::VectorXd g(n);
  Eigen::VectorXd gp(n);
  Eigen::VectorXd d(n);
  Eigen::VectorXd pf(std::max(1, param.past));

  /* Initialize the limited memory. */
  Eigen::VectorXd lm_alpha = Eigen::VectorXd::Zero(m);
  Eigen::MatrixXd lm_s = Eigen::MatrixXd::Zero(n, m);
  Eigen::MatrixXd lm_y = Eigen::MatrixXd::Zero(n, m);
  Eigen::VectorXd lm_ys = Eigen::VectorXd::Zero(m);

  /* Construct a callback data. */
  callback_data_t cd;
  cd.instance = instance;
  cd.proc_evaluate = proc_evaluate;
  cd.proc_stepbound = proc_stepbound;
  cd.proc_progress = proc_progress;

  /* Evaluate the function value and its gradient. */
  fx = cd.proc_evaluate(cd.instance, x, g);

  /* Store the initial value of the cost function. */
  pf(0) = fx;

  /*
  Compute the direction;
  we assume the initial hessian matrix H_0 as the identity matrix.
  */
  d = -g;

  /*
  Make sure that the initial variables are not a stationary point.
  */
  gnorm_inf = g.cwiseAbs().maxCoeff(); // TODO L1 范数？
  xnorm_inf = x.cwiseAbs().maxCoeff();

  if (gnorm_inf / std::max(1.0, xnorm_inf) <= param.g_epsilon) {
    /* The initial guess is already a stationary point. */
    ret = LBFGS_CONVERGENCE;
  }
  else {
    /*
    Compute the initial step:
    */
    step = 1.0 / d.norm();

    k = 1;
    end = 0;
    bound = 0;

    while (true) {
      /* Store the current position and gradient vectors. */
      xp = x;
      gp = g;

      /* If the step bound can be provided dynamically, then apply it. */
      step_min = param.min_step;
      step_max = param.max_step;
      if (cd.proc_stepbound) {
        step_max = cd.proc_stepbound(cd.instance, xp, d);
        step_max = step_max < param.max_step ? step_max : param.max_step;
      }
      step = step < step_max ? step : 0.5 * step_max;

      /**
       * Search for an optimal step.
       *  choose one of the following linesearch algorithms:
       */
      // ls = line_search_lewisoverton(x, fx, g, step, d, xp, gp, cd, param);
      ls = line_search_morethuente(x, fx, g, step, d, xp, gp, cd, param);

      if (ls < 0) {
        /* Revert to the previous point. */
        x = xp;
        g = gp;
        ret = ls;
        break;
      }

      /* Report the progress. */
      // if (cd.proc_progress) {
      //   if (cd.proc_progress(cd.instance, x, g, fx, step, k, ls)) {
      //     ret = LBFGSERR_CANCELED;
      //     break;
      //   }
      // }

      /*
      Convergence test.
      The criterion is given by the following formula:
      ||g(x)||_inf / max(1, ||x||_inf) < g_epsilon
      */
      gnorm_inf = g.cwiseAbs().maxCoeff();
      xnorm_inf = x.cwiseAbs().maxCoeff();
      if (gnorm_inf / std::max(1.0, xnorm_inf) < param.g_epsilon) {
        /* Convergence. */
        ret = LBFGS_CONVERGENCE;
        break;
      }

      /*
      Test for stopping criterion.
      The criterion is given by the following formula:
      |f(past_x) - f(x)| / max(1, |f(x)|) < \delta.
      */
      if (0 < param.past) {
        /* We don't test the stopping criterion while k < past. */
        if (param.past <= k) {
          /* The stopping criterion. */
          rate = std::fabs(pf(k % param.past) - fx) / std::max(1.0, std::fabs(fx));

          if (rate < param.delta) {
            ret = LBFGS_STOP;
            break;
          }
        }

        /* Store the current value of the cost function. */
        pf(k % param.past) = fx;
      }

      if (param.max_iterations != 0 && param.max_iterations <= k) {
        /* Maximum number of iterations. */
        ret = LBFGSERR_MAXIMUMITERATION;
        break;
      }

      /* Count the iteration number. */
      ++k;

      /*
      Update vectors s and y:
      s_{k+1} = x_{k+1} - x_{k} = \step * d_{k}.
      y_{k+1} = g_{k+1} - g_{k}.
      */
      lm_s.col(end) = x - xp;
      lm_y.col(end) = g - gp;

      /*
      Compute scalars ys and yy:
      ys = y^t \cdot s = 1 / \rho.
      yy = y^t \cdot y.
      Notice that yy is used for scaling the hessian matrix H_0 (Cholesky factor).
      */
      ys = lm_y.col(end).dot(lm_s.col(end));
      yy = lm_y.col(end).squaredNorm();
      lm_ys(end) = ys;

      /* Compute the negative of gradients. */
      d = -g;

      /*
      Only cautious update is performed here as long as
      (y^t \cdot s) / ||s_{k+1}||^2 > \epsilon * ||g_{k}||^\alpha,
      where \epsilon is the cautious factor and a proposed value
      for \alpha is 1.
      This is not for enforcing the PD of the approxomated Hessian
      since ys > 0 is already ensured by the weak Wolfe condition.
      This is to ensure the global convergence as described in:
      Dong-Hui Li and Masao Fukushima. On the global convergence of
      the BFGS method for nonconvex unconstrained optimization problems.
      SIAM Journal on Optimization, Vol 11, No 4, pp. 1054-1064, 2011.
      */
      cau = lm_s.col(end).squaredNorm() * gp.norm() * param.cautious_factor;

      if (ys > cau) {
        /*
        Recursive formula to compute dir = -(H \cdot g).
        This is described in page 779 of:
        Jorge Nocedal.
        Updating Quasi-Newton Matrices with Limited Storage.
        Mathematics of Computation, Vol. 35, No. 151,
        pp. 773--782, 1980.
        */
        ++bound;
        bound = m < bound ? m : bound;
        end = (end + 1) % m;

        j = end;
        for (i = 0; i < bound; ++i) {
          j = (j + m - 1) % m; /* if (--j == -1) j = m-1; */
          /* \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1}. */
          lm_alpha(j) = lm_s.col(j).dot(d) / lm_ys(j);
          /* q_{i} = q_{i+1} - \alpha_{i} y_{i}. */
          d += (-lm_alpha(j)) * lm_y.col(j);
        }

        d *= ys / yy;

        for (i = 0; i < bound; ++i) {
          /* \beta_{j} = \rho_{j} y^t_{j} \cdot \gamm_{i}. */
          beta = lm_y.col(j).dot(d) / lm_ys(j);
          /* \gamm_{i+1} = \gamm_{i} + (\alpha_{j} - \beta_{j}) s_{j}. */
          d += (lm_alpha(j) - beta) * lm_s.col(j);
          j = (j + 1) % m; /* if (++j == m) j = 0; */
        }
      }

      /* The search direction d is ready. We try step = 1 first. */
      step = 1.0;
    }
  }

  /* Return the final value of the cost function. */
  f = fx;

  return ret;
}

/**
 * Get string description of an lbfgs_optimize() return code.
 *
 *  @param err          A value returned by lbfgs_optimize().
 */
inline const char *lbfgs_strerror(const int err)
{
  switch (err) {
  case LBFGS_CONVERGENCE:
    return "Success: reached convergence (g_epsilon).";

  case LBFGS_STOP:
    return "Success: met stopping criteria (past f decrease less than delta).";

  case LBFGSERR_CANCELED:
    return "The iteration has been canceled by the monitor callback.";

  case LBFGSERR_UNKNOWNERROR:
    return "Unknown error.";

  case LBFGSERR_INVALID_N:
    return "Invalid number of variables specified.";

  case LBFGSERR_INVALID_MEMSIZE:
    return "Invalid parameter lbfgs_parameter_t::mem_size specified.";

  case LBFGSERR_INVALID_GEPSILON:
    return "Invalid parameter lbfgs_parameter_t::g_epsilon specified.";

  case LBFGSERR_INVALID_TESTPERIOD:
    return "Invalid parameter lbfgs_parameter_t::past specified.";

  case LBFGSERR_INVALID_DELTA:
    return "Invalid parameter lbfgs_parameter_t::delta specified.";

  case LBFGSERR_INVALID_MINSTEP:
    return "Invalid parameter lbfgs_parameter_t::min_step specified.";

  case LBFGSERR_INVALID_MAXSTEP:
    return "Invalid parameter lbfgs_parameter_t::max_step specified.";

  case LBFGSERR_INVALID_FDECCOEFF:
    return "Invalid parameter lbfgs_parameter_t::f_dec_coeff specified.";

  case LBFGSERR_INVALID_SCURVCOEFF:
    return "Invalid parameter lbfgs_parameter_t::s_curv_coeff specified.";

  case LBFGSERR_INVALID_MACHINEPREC:
    return "Invalid parameter lbfgs_parameter_t::xtol specified.";

  case LBFGSERR_INVALID_MAXLINESEARCH:
    return "Invalid parameter lbfgs_parameter_t::max_linesearch specified.";

  case LBFGSERR_INVALID_FUNCVAL:
    return "The function value became NaN or Inf.";

  case LBFGSERR_MINIMUMSTEP:
    return "The line-search step became smaller than lbfgs_parameter_t::min_step.";

  case LBFGSERR_MAXIMUMSTEP:
    return "The line-search step became larger than lbfgs_parameter_t::max_step.";

  case LBFGSERR_MAXIMUMLINESEARCH:
    return "Line search reaches the maximum try number, assumptions not satisfied or "
           "precision not achievable.";

  case LBFGSERR_MAXIMUMITERATION:
    return "The algorithm routine reaches the maximum number of iterations.";

  case LBFGSERR_WIDTHTOOSMALL:
    return "Relative search interval width is at least lbfgs_parameter_t::machine_prec.";

  case LBFGSERR_INVALIDPARAMETERS:
    return "A logic error (negative line-search step) occurred.";

  case LBFGSERR_INCREASEGRADIENT:
    return "The current search direction increases the cost function value.";

  case LBFGSERR_OUTOFINTERVAL:
    return "The line-search step went out of the interval of uncertainty.";

  case LBFGSERR_INCORRECT_TMINMAX:
    return "A logic error occurred; alternatively, the interval of uncertainty "
           "became too small.";

  case LBFGSERR_ROUNDING_ERROR:
    return "A rounding error occurred; alternatively, no line-search step"
           " satisfies the sufficient decrease and curvature conditions.";

  default:
    return "(unknown)";
  }
}

} // namespace lbfgs

#endif
