#pragma once

#include <iostream>
#include "../core/i_constant.h"
#include "../core/i_alloc.h"
#include "../numeric/i_cholesky.h"

/*LM implementation similar to the algorithm published in [1] "Methods for Non-Linear Least Squares Problems", by K. Madsen et. al., Tech. Univ. of Denmark*/

#define I_DEFAULT_LM_MAX_ITER   100
#define I_DEFAULT_LM_TAU        1e-3 /*as suggested by [1], if x0 is a good approximation to x, use 1e-6, otherwise use 1e-3 or even 1.0, to be safe we use 1e-3*/
#define I_DEFAULT_LM_EPSILON1   1e-12
#define I_DEFAULT_LM_EPSILON2   1e-12

namespace idl
{
  class LevenbergMarquardt
  {
  public:
    LevenbergMarquardt() :_m(0), _n(0), _J(NULL), _JtJ(NULL), _Jtf(NULL), _A(NULL), _ftmp(NULL), _xnew(NULL), _J_f(NULL), _JtJ_f(NULL), _Jtf_f(NULL), _A_f(NULL), _ftmp_f(NULL), _xnew_f(NULL), _nr_iter(0), _status(0), _max_iter(I_DEFAULT_LM_MAX_ITER), _verbose(true){};
    virtual ~LevenbergMarquardt(){ cleanup(); cleanup_f(); };
    int getNumIter() const { return _nr_iter; };
    void setMaxFuncEv(int max_iter) { _max_iter = max_iter; };
    int  getMaxFuncEv() const { return _max_iter; };
    void setVerbose(bool verbose) { _verbose = verbose; };
    bool getVerbose() const { return _verbose; };
    //obj_func is non-const because it may have internal states, which can change at each run
    template <typename F>
    bool optimize(F& obj_func, double *x, /*parameters to be optimized*/double *fvec/*length n is the number of residuals*/);
    template <typename F>
    bool optimize(F& obj_func,  float *x, /*parameters to be optimized*/ float *fvec/*length n is the number of residuals*/);
  protected:
    template <typename F>
    void runLM(F& obj_func,
               double *x,/*size m*/
               double *fvec/*size n*/);

    template <typename F>
    void runLM(F& obj_func,
               float *x,/*size m*/
               float *fvec/*size n*/);

    void cleanup()
    {
      i_free<double>(_J);
      i_free<double>(_JtJ);
      i_free<double>(_A);
      i_free<double>(_Jtf);
      i_free<double>(_ftmp);
      i_free<double>(_xnew);
    };

    void cleanup_f()
    {
      i_free<float>(_J_f);
      i_free<float>(_JtJ_f);
      i_free<float>(_A_f);
      i_free<float>(_Jtf_f);
      i_free<float>(_ftmp_f);
      i_free<float>(_xnew_f);
    };

    bool allocMemory(const int m, const int n)
    {
      if (m == _m && n == _n)
      {
        if (_J == NULL) { _J = i_alloc<double>(n*m); } /*J*/
        if (_JtJ == NULL) { _JtJ = i_alloc<double>(m*m); } /*JtJ*/
        if (_A == NULL) { _A = i_alloc<double>(m*m); } /*A*/
        if (_Jtf == NULL) { _Jtf = i_alloc<double>(m); } /*Jtf*/
        if (_ftmp == NULL) { _ftmp = i_alloc<double>(n); } /*ftmp*/
        if (_xnew == NULL) { _xnew = i_alloc<double>(m); } /*xnew*/
      }
      else
      {
        cleanup();
        _J = i_alloc<double>(n*m); /*J*/
        _JtJ = i_alloc<double>(m*m); /*JtJ*/
        _A = i_alloc<double>(m*m); /*A*/
        _Jtf = i_alloc<double>(m);   /*Jtf*/
        _ftmp = i_alloc<double>(n);  /*ftmp*/
        _xnew = i_alloc<double>(m);  /*xnew*/
      }
      if (_J == NULL || _JtJ == NULL || _A == NULL || _Jtf == NULL || _ftmp == NULL || _xnew == NULL)
      {
        return false;
      }
      return true;
    };

    bool allocMemory_f(const int m, const int n)
    {
      if (m == _m && n == _n)
      {
        if (_J_f == NULL) { _J_f = i_alloc<float>(n*m); } /*J*/
        if (_JtJ_f == NULL) { _JtJ_f = i_alloc<float>(m*m); } /*JtJ*/
        if (_A_f == NULL) { _A_f = i_alloc<float>(m*m); } /*A*/
        if (_Jtf_f == NULL) { _Jtf_f = i_alloc<float>(m); } /*Jtf*/
        if (_ftmp_f == NULL) { _ftmp_f = i_alloc<float>(n); } /*ftmp*/
        if (_xnew_f == NULL) { _xnew_f = i_alloc<float>(m); } /*xnew*/
      }
      else
      {
        cleanup_f();
        _J_f = i_alloc<float>(n*m); /*J*/
        _JtJ_f = i_alloc<float>(m*m); /*JtJ*/
        _A_f = i_alloc<float>(m*m); /*A*/
        _Jtf_f = i_alloc<float>(m);   /*Jtf*/
        _ftmp_f = i_alloc<float>(n);  /*ftmp*/
        _xnew_f = i_alloc<float>(m);  /*xnew*/
      }
      if (_J_f == NULL || _JtJ_f == NULL || _A_f == NULL || _Jtf_f == NULL || _ftmp_f == NULL || _xnew_f == NULL)
      {
        return false;
      }
      return true;
    };

    void diagnose(std::ostream& os) const
    {
      switch (_status) {
      case 0:
        break; // error in user function
      case 1: // ftol
        os << "both actual and predicted relative reductions in the sum of squares are at most ftol.";
        break;
      case 2:
        os << "ftol is too small. no further reduction in the sum of squares is possible.";
        break;
      case 3:
        os << "xtol is too small. no further improvement in the approximate solution x is possible.";
        break;
      default:
        os << "unknown error";
        break;
      }
      os << std::endl;
      os << (int)_nr_iter << " iterations, " << std::endl;
    };
  protected:
    int _m;           /*number of unknowns*/
    int _n;           /*number of residuals*/
    int _max_iter;     /* maximum number of function evaluations*/
    int _nr_iter;     /* number of iterations*/
    int _status;      /* optimizer status*/
    bool _verbose;    /* if output intermediate results and status*/
    /*internal memories:*/
    double *_J, *_JtJ;  /*Jacobian matrix J and J^tJ*/
    double *_A;         /*left hand side of the augmented normal equation, A = _JtJ+lambda*I*/
    double *_Jtf;       /*right hand side of the augmented normal equation*/
    double *_ftmp;
    double *_xnew;      /*the new x: xnew = x + update vector*/
    float *_J_f, *_JtJ_f;  /*Jacobian matrix J and J^tJ*/
    float *_A_f;         /*left hand side of the augmented normal equation, A = _JtJ+lambda*I*/
    float *_Jtf_f;       /*right hand side of the augmented normal equation*/
    float *_ftmp_f;
    float *_xnew_f;      /*the new x: xnew = x + update vector*/
  };

template<typename F>
bool LevenbergMarquardt::optimize(F& obj_func, double *x,/*parameters to be optimized*/ double *fvec/*length n is the number of residuals*/)
{
  const int m = obj_func.getNumUnknowns();
  const int n = obj_func.getNumberResiduals(); 
  if (!m || !n)
  {
    _status = 1;
    if (_verbose)
    {
      std::cerr << "The number of residual is zero or the parameter has size zero." << std::endl;
      std::cerr << "Levenberg-Marquardt does not work under such condition." << std::endl;
    }
    return false;
  }
  if (m > n) /*LM requires m<=n*/
  {
    _status = 1;
    if (_verbose)
    {
      std::cerr << "The number of residual is smaller than the number of unknowns." << std::endl;
      std::cerr << "The optimization problem is under constrainted." << std::endl;
      std::cerr << "Levenberg-Marquardt does not work under such condition." << std::endl;
    }
    return false;
  }
  if (!allocMemory(m, n))
  {
    _status = 2;
    if (_verbose)
    {
      std::cerr << "Not enough system memory for LM optimization." << std::endl;
    }
    return false;
  }

  /*assign new m, n to _m and _n*/
  _m = m;
  _n = n;
  
  /*Levenberg-Marquardt routine*/
  runLM(obj_func, x, fvec); 
  
 /* if (obj_func.isFailed())
  {
    _status = 3;
    if (_verbose)
    {
      std::cerr << "Internal error in evaluating the cost function or the jacobian" << std::endl;
    }
    return false;
  }*/
  
  if (_verbose) 
  {
    diagnose(std::cout);
  }
  return true;
}

template<typename F>
bool LevenbergMarquardt::optimize(F& obj_func, float *x,/*parameters to be optimized*/ float *fvec/*length n is the number of residuals*/)
{
  const int m = obj_func.getNumUnknowns();
  const int n = obj_func.getNumberResiduals();
  if (!m || !n)
  {
    _status = 1;
    if (_verbose)
    {
      std::cerr << "The number of residual is zero or the parameter has size zero." << std::endl;
      std::cerr << "Levenberg-Marquardt does not work under such condition." << std::endl;
    }
    return false;
  }
  if (m > n) /*LM requires m<=n*/
  {
    _status = 1;
    if (_verbose)
    {
      std::cerr << "The number of residual is smaller than the number of unknowns." << std::endl;
      std::cerr << "The optimization problem is under constrainted." << std::endl;
      std::cerr << "Levenberg-Marquardt does not work under such condition." << std::endl;
    }
    return false;
  }
  if (!allocMemory_f(m, n))
  {
    _status = 2;
    if (_verbose)
    {
      std::cerr << "Not enough system memory for LM optimization." << std::endl;
    }
    return false;
  }

  /*assign new m, n to _m and _n*/
  _m = m;
  _n = n;

  /*Levenberg-Marquardt routine*/
  runLM(obj_func, x, fvec);

  /* if (obj_func.isFailed())
  {
  _status = 3;
  if (_verbose)
  {
  std::cerr << "Internal error in evaluating the cost function or the jacobian" << std::endl;
  }
  return false;
  }*/

  if (_verbose)
  {
    diagnose(std::cout);
  }
  return true;
}

template<typename F>
void LevenbergMarquardt::runLM(F& obj_func, double *x,/*parameters to be optimized*/ double *fvec/*length n is the number of residuals*/)
{
  /*initialize the internal parameters used by the Levenberg-Marquardt routine*/
  _status = 0;
  _nr_iter = 0;
  const int kSizeofAugmentedMatrix = _m*_m;
  const double kConstOneOverThree = 1.0 / 3.0;
  const double kTau = (double)I_DEFAULT_LM_TAU;
  //const double kEpsilon1 = (double)I_DEFAULT_LM_EPSILON1;
  const double kEpsilon = (double)I_DEFAULT_LM_EPSILON2;
  double nu = 2.0;
  obj_func.computeJacobian(x, _J);
  i_mult_AtA<double>(_J, _JtJ, _n, _m);
  obj_func.computeCost(x, fvec);
  i_mult_Atx<double>(_J, fvec, _Jtf, _n, _m); 
  double lambda = kTau * i_max_diagonal_element(_JtJ, _m);
  double rho, dx_sqrsum, denominator, fsqr, fpsqr;
  bool terminate = i_infinity_norm(_Jtf, _m, 1) < I_DEFAULT_LM_EPSILON1;

  if (_verbose)
  {
    std::cout << "Initial error (squared): " << i_squaresum(fvec, _n) << std::endl;
  }
  while (!terminate && _nr_iter < _max_iter)
  {
    ++_nr_iter;
    /*solve the augmented normal equation*/
    i_copy(_JtJ, _A, kSizeofAugmentedMatrix);
    i_augment_diagonal(_A, _m, lambda);
    i_solve_cholesky(_A, _Jtf, _xnew, _m);
    i_neg(_xnew, _m);
    
    dx_sqrsum = i_squaresum(_xnew, _m);
    if (i_sqrt(dx_sqrsum) < kEpsilon*i_l2_norm(x, _m))
    {
      terminate = true;
    }
    else
    {
      denominator = lambda*dx_sqrsum - i_dot(_xnew, _Jtf, _m);
      i_add(x, _xnew, _m); /*x_new = x + _xnew;*/
      obj_func.computeCost(_xnew, _ftmp);
      fsqr  = i_squaresum(fvec,  _n);
      fpsqr = i_squaresum(_ftmp, _n);
      rho = i_div((fsqr - fpsqr), denominator);
      if (rho > 0.0) /*step acceptable*/
      {
        i_copy(_xnew, x, _m);
        obj_func.computeJacobian(x, _J);
        i_mult_AtA<double>(_J, _JtJ,  _n,   _m);
        i_copy(_ftmp, fvec, _n); /*note that _ftmp already stored e = f(x_new) - observations, no need to recompute*/
        i_mult_Atx<double>(_J, fvec, _Jtf, _n, _m);
        terminate = i_infinity_norm(_Jtf, _m, 1) < I_DEFAULT_LM_EPSILON1;
        lambda = lambda * i_max(kConstOneOverThree, 1.0 - i_pow(2.0*rho - 1.0, 3));
        nu = 2.0;
      }
      else
      {
        lambda = nu*lambda;
        nu = nu*2.0;
      }
      if (_verbose)
      {
        std::cout << "Iterarion " << _nr_iter << " error(squared) : " << i_squaresum(fvec, _n) << std::endl;
      }
    } 
  }
  if (_verbose)
  {
    std::cout << "Final error(squared) : " << i_squaresum(fvec, _n) << std::endl;
  }
}

template<typename F>
void LevenbergMarquardt::runLM(F& obj_func, float *x,/*parameters to be optimized*/ float *fvec/*length n is the number of residuals*/)
{
  /*initialize the internal parameters used by the Levenberg-Marquardt routine*/
  _status = 0;
  _nr_iter = 0;
  const int kSizeofAugmentedMatrix = _m*_m;
  const float kConstOneOverThree = 1.0f / 3.0f;
  const float kTau = (float)I_DEFAULT_LM_TAU;
  //const float kEpsilon1 = (float)I_DEFAULT_LM_EPSILON1;
  const float kEpsilon = (float)I_DEFAULT_LM_EPSILON2;
  float nu = 2.0f;
  obj_func.computeJacobian(x, _J_f);
  i_mult_AtA<float>(_J_f, _JtJ_f, _n, _m);
  obj_func.computeCost(x, fvec);
  i_mult_Atx<float>(_J_f, fvec, _Jtf_f, _n, _m);
  float lambda = kTau * i_max_diagonal_element(_JtJ_f, _m);
  float rho;
  float dx_sqrsum, denominator, fsqr, fpsqr;
  bool terminate = i_infinity_norm(_Jtf_f, _m, 1) < (float)I_DEFAULT_LM_EPSILON1;

  if (_verbose)
  {
    std::cout << "Initial error (squared): " << i_squaresum(fvec, _n) << std::endl;
  }
  while (!terminate && _nr_iter < _max_iter)
  {
    ++_nr_iter;
    /*solve the augmented normal equation*/
    i_copy(_JtJ_f, _A_f, kSizeofAugmentedMatrix);
    i_augment_diagonal(_A_f, _m, lambda);
    i_solve_cholesky(_A_f, _Jtf_f, _xnew_f, _m);
    i_neg(_xnew_f, _m);

    dx_sqrsum = i_squaresum(_xnew_f, _m);
    if (i_sqrt(dx_sqrsum) < kEpsilon*i_l2_norm(x, _m))
    {
      terminate = true;
    }
    else
    {
      denominator = lambda*dx_sqrsum - i_dot(_xnew_f, _Jtf_f, _m);
      i_add(x, _xnew_f, _m); /*x_new = x + _xnew;*/
      obj_func.computeCost(_xnew_f, _ftmp_f);
      fsqr = i_squaresum(fvec, _n);
      fpsqr = i_squaresum(_ftmp_f, _n);
      rho = i_div((fsqr - fpsqr), denominator);
      if (rho > 0.0f) /*step acceptable*/
      {
        i_copy(_xnew_f, x, _m);
        obj_func.computeJacobian(x, _J_f);
        i_mult_AtA<float>(_J_f, _JtJ_f, _n, _m);
        i_copy(_ftmp_f, fvec, _n); /*note that _ftmp already stored e = f(x_new) - observations, no need to recompute*/
        i_mult_Atx<float>(_J_f, fvec, _Jtf_f, _n, _m);
        terminate = i_infinity_norm(_Jtf_f, _m, 1) < (float)I_DEFAULT_LM_EPSILON1;
        lambda = lambda * i_max(kConstOneOverThree, 1.0f - i_pow(2.0f*rho - 1.0f, 3));
        nu = 2.0f;
      }
      else
      {
        lambda = nu*lambda;
        nu = nu*2.0f;
      }
      if (_verbose)
      {
        std::cout << "Iterarion " << _nr_iter << " error(squared) : " << i_squaresum(fvec, _n) << std::endl;
      }
    }
  }
  if (_verbose)
  {
    std::cout << "Final error(squared) : " << i_squaresum(fvec, _n) << std::endl;
  }
}

}/* namespace idl */
