#include "least_square.h"

LeastSquare::LeastSquare()
{}

LeastSquare::~LeastSquare()
{}

double LeastSquare::CostFunc(const Eigen::VectorXd &input,
                             const Eigen::VectorXd &output,
                             const Eigen::VectorXd &params,
                             int                    index)
{
  // residual = A * sin(Bx) + C * cos(D*x) - F
  double a = params(0);
  double b = params(1);
  double c = params(2);
  double d = params(3);
  double x = input(index);
  double f = output(index);

  return a * sin(b * x) + c * cos(d * x) - f;
}

void LeastSquare::ObjF(const Eigen::VectorXd &input, const Eigen::VectorXd &output, const Eigen::VectorXd &params)
{
  fx_.resize(input.rows(), 1);
  for (int i = 0; i < input.rows(); i++)
  {
    fx_(i) = CostFunc(input, output, params, i);
  }
}

double LeastSquare::Deriv(
    const Eigen::VectorXd &input, const Eigen::VectorXd &output, int index, const Eigen::VectorXd &params, int p_index)
{
  Eigen::VectorXd para1 = params;
  Eigen::VectorXd para2 = params;

  para1(p_index) -= DERIV_STEP;
  para2(p_index) += DERIV_STEP;

  double obj1 = CostFunc(input, output, para1, index);
  double obj2 = CostFunc(input, output, para2, index);

  return (obj2 - obj1) / (2 * DERIV_STEP);
}

void LeastSquare::Jacobian(const Eigen::VectorXd &input, const Eigen::VectorXd &output, const Eigen::VectorXd &params)
{
  int rowNum = input.rows();
  int colNum = params.rows();
  J_.resize(rowNum, colNum);

  for (int i = 0; i < rowNum; i++)
  {
    for (int j = 0; j < colNum; j++)
    {
      J_(i, j) = Deriv(input, output, i, params, j);
    }
  }
}

void LeastSquare::GaussNewton(const Eigen::VectorXd &input, const Eigen::VectorXd &output, Eigen::VectorXd &params)
{
  int    errNum   = input.rows();  // error  num
  int    paraNum  = params.rows(); // parameter  num
  double last_sum = 0;
  int    iterCnt  = 0;
  while (iterCnt < MAX_ITER)
  {
    ObjF(input, output, params);
    double sum = 0;
    sum        = Func(fx_);

    std::cout << "Iterator index: " << iterCnt << std::endl;
    std::cout << "parameter: " << std::endl << params << std::endl;
    std::cout << "error sum: " << std::endl << sum << std::endl << std::endl;

    if (fabs(sum - last_sum) <= 1e-12)
      break;
    last_sum = sum;

    Jacobian(input, output, params);
    Eigen::VectorXd delta(paraNum);
    delta = (J_.transpose() * J_).inverse() * J_.transpose() * fx_;

    params -= delta;
    iterCnt++;
  }
}

double LeastSquare::MaxMatrixDiag(const Eigen::MatrixXd &Hessian)
{
  int max = 0;
  for (int i = 0; i < Hessian.rows(); i++)
  {
    if (Hessian(i, i) > max)
      max = Hessian(i, i);
  }

  return max;
}

double LeastSquare::DeltaL(const Eigen::VectorXd &h)
{
  Eigen::MatrixXd L = -h.transpose() * J_.transpose() * fx_ - 0.5 * h.transpose() * J_.transpose() * J_ * h;
  return L(0, 0);
}

void LeastSquare::LM(const Eigen::VectorXd &input, const Eigen::VectorXd &output, Eigen::VectorXd &params)
{
  int errNum  = input.rows();  // error num
  int paraNum = params.rows(); // parameter num

  // initial parameter
  ObjF(input, output, params);
  Jacobian(input, output, params);                 // jacobin
  Eigen::MatrixXd A        = J_.transpose() * J_;  // Hessian
  Eigen::VectorXd gradient = J_.transpose() * fx_; // gradient

  // initial parameter tao v epsilon1 epsilon2
  double    tao  = 1e-3;
  long long v    = 2;
  double    eps1 = 1e-12, eps2 = 1e-12;
  double    u     = tao * MaxMatrixDiag(A);
  bool      found = gradient.norm() <= eps1;
  if (found)
    return;

  double last_sum = 0;
  int    iterCnt  = 0;

  while (iterCnt < MAX_ITER)
  {
    ObjF(input, output, params);

    Jacobian(input, output, params); // jacobin
    A        = J_.transpose() * J_;  // Hessian
    gradient = J_.transpose() * fx_; // gradient

    if (gradient.norm() <= eps1)
    {
      std::cout << "stop g(x) = 0 for a local minimizer optimizer." << std::endl;
      break;
    }

    std::cout << "A: " << std::endl << A << std::endl;

    Eigen::VectorXd step = (A + u * Eigen::MatrixXd::Identity(paraNum, paraNum)).inverse() * gradient; // negtive Hlm.

    std::cout << "step: " << std::endl << step << std::endl;

    if (step.norm() <= eps2 * (params.norm() + eps2))
    {
      std::cout << "stop because change in x is small" << std::endl;
      break;
    }

    Eigen::VectorXd paramsNew(params.rows());
    paramsNew = params - step; // h_lm = -step;

    // compute f(x)
    ObjF(input, output, params);
    Eigen::VectorXd fx(input.rows(), 1);
    fx = fx_;

    // compute f(x_new)
    Eigen::VectorXd fx_new(input.rows(), 1);
    ObjF(input, output, paramsNew);
    fx_new = fx_;

    double deltaF = Func(fx) - Func(fx_new);
    double deltaL = DeltaL(-1 * step);

    double roi = deltaF / deltaL;
    std::cout << "roi is : " << roi << std::endl;
    if (roi > 0)
    {
      params = paramsNew;
      u *= max(1.0 / 3.0, 1 - pow(2 * roi - 1, 3));
      v = 2;
    } else
    {
      u = u * v;
      v = v * 2;
    }

    std::cout << "u = " << u << " v = " << v << std::endl;

    iterCnt++;
    std::cout << "Iterator " << iterCnt << " times, result is :" << std::endl << std::endl;
  }
}

void LeastSquare::DogLeg(const Eigen::VectorXd &input, const Eigen::VectorXd &output, Eigen::VectorXd &params)
{
  int errNum  = input.rows();  // error num
  int paraNum = params.rows(); // parameter num

  ObjF(input, output, params);
  Jacobian(input, output, params);                 // jacobin
  Eigen::VectorXd gradient = J_.transpose() * fx_; // gradient

  // initial parameter tao v epsilon1 epsilon2
  double eps1 = 1e-12, eps2 = 1e-12, eps3 = 1e-12;
  double radius = 1.0;

  bool found = fx_.norm() <= eps3 || gradient.norm() <= eps1;
  if (found)
    return;

  double last_sum = 0;
  int    iterCnt  = 0;
  while (iterCnt < MAX_ITER)
  {
    ObjF(input, output, params);
    Jacobian(input, output, params);
    gradient = J_.transpose() * fx_;

    if (gradient.norm() <= eps1)
    {
      std::cout << "stop F'(x) = g(x) = 0 for a global minimizer optimizer." << std::endl;
      break;
    }
    if (fx_.norm() <= eps3)
    {
      std::cout << "stop f(x) = 0 for f(x) is so small" << std::endl;
      break;
    }

    // compute how far go along stepest descent direction.
    double alpha = gradient.squaredNorm() / (J_ * gradient).squaredNorm();
    // compute gauss newton step and stepest descent step.
    Eigen::VectorXd stepest_descent = -alpha * gradient;
    Eigen::VectorXd gauss_newton    = (J_.transpose() * J_).inverse() * J_.transpose() * fx_ * (-1);

    double beta = 0;

    // compute dog-leg step.
    Eigen::VectorXd dog_leg(params.rows());
    if (gauss_newton.norm() <= radius)
      dog_leg = gauss_newton;
    else if (alpha * stepest_descent.norm() >= radius)
      dog_leg = (radius / stepest_descent.norm()) * stepest_descent;
    else
    {
      Eigen::VectorXd a = alpha * stepest_descent;
      Eigen::VectorXd b = gauss_newton;
      double          c = a.transpose() * (b - a);
      beta = (sqrt(c * c + (b - a).squaredNorm() * (radius * radius - a.squaredNorm())) - c) / (b - a).squaredNorm();

      dog_leg = alpha * stepest_descent + beta * (gauss_newton - alpha * stepest_descent);
    }

    std::cout << "dog-leg: " << std::endl << dog_leg << std::endl;

    if (dog_leg.norm() <= eps2 * (params.norm() + eps2))
    {
      std::cout << "stop because change in x is small" << std::endl;
      break;
    }

    Eigen::VectorXd new_params(params.rows());
    new_params = params + dog_leg;

    std::cout << "new parameter is: " << std::endl << new_params << std::endl;

    // compute f(x)
    ObjF(input, output, params);
    Eigen::VectorXd fx(input.rows(), 1);
    fx = fx_;

    // compute f(x_new)
    // compute f(x_new)
    Eigen::VectorXd fx_new(input.rows(), 1);
    ObjF(input, output, new_params);
    fx_new = fx_;

    // compute delta F = F(x) - F(x_new)
    double deltaF = Func(fx) - Func(fx_new);

    // compute delat L =L(0)-L(dog_leg)
    double deltaL = 0;
    if (gauss_newton.norm() <= radius)
      deltaL = Func(fx_);
    else if (alpha * stepest_descent.norm() >= radius)
      deltaL = radius * (2 * alpha * gradient.norm() - radius) / (2.0 * alpha);
    else
    {
      Eigen::VectorXd a = alpha * stepest_descent;
      Eigen::VectorXd b = gauss_newton;
      double          c = a.transpose() * (b - a);
      beta = (sqrt(c * c + (b - a).squaredNorm() * (radius * radius - a.squaredNorm())) - c) / (b - a).squaredNorm();

      deltaL = alpha * (1 - beta) * (1 - beta) * gradient.squaredNorm() / 2.0 + beta * (2.0 - beta) * Func(fx_);
    }

    double roi = deltaF / deltaL;
    if (roi > 0)
    {
      params = new_params;
    }
    if (roi > 0.75)
    {
      radius = max(radius, 3.0 * dog_leg.norm());
    } else if (roi < 0.25)
    {
      radius = radius / 2.0;
      if (radius <= eps2 * (params.norm() + eps2))
      {
        std::cout << "trust region radius is too small." << std::endl;
        break;
      }
    }

    std::cout << "roi: " << roi << " dog-leg norm: " << dog_leg.norm() << std::endl;
    std::cout << "radius: " << radius << std::endl;

    iterCnt++;
    std::cout << "Iterator " << iterCnt << " times" << std::endl << std::endl;
  }
}