#pragma once

#include "../core/i_alloc.h"
#include "../numeric/i_poly.h"
#include "../numeric/i_nullspace.h"
#include "../optimize/i_lm.h"
#include "i_rot.h"
#include "i_util.h"

namespace idl
{
  /*Constraints on a camera P matrix such that x~=PX.
  These functions gives the constraints from the rows of [x]_x*P*X*/
  template<typename T>
  inline void i_six_point_constraint_r0(const T x[3], const T X[4], T v[12])
  {
    v[0] = (T)0.0;       v[1] = (T)0.0;       v[ 2] = (T)0.0;       v[ 3] = (T)0.0;
    v[4] = -x[2] * X[0]; v[5] = -x[2] * X[1]; v[ 6] = -x[2] * X[2]; v[ 7] = -x[2] * X[3];
    v[8] =  x[1] * X[0]; v[9] =  x[1] * X[1]; v[10] =  x[1] * X[2]; v[11] =  x[1] * X[3];
  }

  template<typename T>
  inline void i_six_point_constraint_r1(const T x[3], const T X[4], T v[12])
  {
    v[0] = x[2] * X[0];  v[1] =  x[2] * X[1]; v[ 2] =  x[2] * X[2]; v[ 3] =  x[2] * X[3];
    v[4] = (T)0.0;       v[5] = (T)0.0;       v[ 6] = (T)0.0;       v[ 7] = (T)0.0;
    v[8] = -x[0] * X[0]; v[9] = -x[0] * X[1]; v[10] = -x[0] * X[2]; v[11] = -x[0] * X[3];
  }

  template<typename T>
  inline void i_six_point_constraint_r2(const T x[3], const T X[4], T v[12])
  {
    v[0] = -x[1] * X[0]; v[1] = -x[1] * X[1];  v[ 2] = -x[1] * X[2]; v[ 3] = -x[1] * X[3];
    v[4] =  x[0] * X[0]; v[5] =  x[0] * X[1];  v[ 6] =  x[0] * X[2]; v[ 7] =  x[0] * X[3];
    v[8] = (T)0.0;       v[9] =  (T)0.0;       v[10] =  (T)0.0;      v[11] =  (T)0.0;
  }

  template<typename T>
  inline void i_six_point_constraint(const T x[3], const T X[4], T v1[12], T v2[12])
  {
    /*constraints from the two rows of [x]_x*P*X that do not correspond to the largest coordinate of x*/
    T x0 = i_abs(x[0]);
    T x1 = i_abs(x[1]);
    T x2 = i_abs(x[2]);
    if (x0 >= x1)
    {
      if (x0 >= x2)
      {
        i_six_point_constraint_r1(x, X, v1);
        i_six_point_constraint_r2(x, X, v2);
      }
      else
      {
        i_six_point_constraint_r0(x, X, v1);
        i_six_point_constraint_r1(x, X, v2);
      }
    }
    else
    {
      if (x1 >= x2)
      {
        i_six_point_constraint_r0(x, X, v1);
        i_six_point_constraint_r2(x, X, v2);
      }
      else
      {
        i_six_point_constraint_r0(x, X, v1);
        i_six_point_constraint_r1(x, X, v2);
      }
    }
  }

  template<typename T>
  inline void i_six_point_constraint(const T x[3], const T X[4], T v[12])
  {
    /*constraints from the two rows of [x]_x*P*X that do not correspond to the largest coordinate of x*/
    T x0 = i_abs(x[0]);
    T x1 = i_abs(x[1]);
    T x2 = i_abs(x[2]);
    if (x0 >= x1)
    {
      if (x1 >= x2) /*x2 <= x1 <= x0*/
      {
        i_six_point_constraint_r2(x, X, v);
      }
      else /*x1 < x2 && x1 <= x0*/
      {
        i_six_point_constraint_r1(x, X, v);
      }
    }
    else
    {
      if (x0 >= x2) /*x2 <= x0 < x1*/
      {
        i_six_point_constraint_r2(x, X, v);
      }
      else /*x0 < x1 && x0 < x2*/
      {
        i_six_point_constraint_r0(x, X, v);
      }
    }
  }

  /*This routine solves the six point perspective camera P matrix:
  Compute camera matrix P such that x~=PX given 6 point correspondences x<->X.
  The points are given as triplets and quadruplets of homogeneous coordinates stored in the rows of x and X,
  Note that The user is responsible for applying data normalization to the homogeneous coordinates
  to something good. The image coordinate system is more important, but a reasonable world coordinate system and reasonable normalization
  of the scale of the homogeneous coordinates is also advisable.
  If nullspace_method is :
   0, LU is used to find the nullspace, faster but less accurate
   1, SVD is used to find the nullspace, slower but more accurate
   else, always use LU*/
  template<typename T> inline void i_six_point_P_matrix(const T xs[18], const T Xs[24], T P[12], int null_space_method = 0)
  {
    T A[132]; /*11x12*/
    /*5.5 point constraints*/
    i_six_point_constraint(xs,    Xs,    A,    A+12);
    i_six_point_constraint(xs+3,  Xs+4,  A+24, A+36);
    i_six_point_constraint(xs+6,  Xs+8,  A+48, A+60);
    i_six_point_constraint(xs+9,  Xs+12, A+72, A+84);
    i_six_point_constraint(xs+12, Xs+16, A+96, A+108);
    i_six_point_constraint(xs+15, Xs+20, A+120);
    switch (null_space_method)
    {
    case 0:  i_nullspace_lu_11x12_destroy(A, P); break;
    case 1:  i_nullspace_svd_11x12(A, P);        break;
    default: i_nullspace_lu_11x12_destroy(A, P); break;
    }
  }

  /*This routine solves the three point perspective pose problem
  The input normalized (applied K^-1) image points {x1, x2, x3} should be in homogeneous coordinates.
  The input world points {p1, p2, p3} should be in inhomogeneous coordinates.
  Pose (up to 4 solutions) is output in P matrix [R|t]
  Number of solutions will be returned.*/
  template<typename T> inline int i_three_point_pose(const T x1[3], const T x2[3], const T x3[3],
                                                     const T p1[3], const T p2[3], const T p3[3], T P[48])
  {
    int nr_solutions = 0;
    i_zero(P, 48);
    /*unitized direction vectors of image points. 
      direction vectors should point in the actual forward direction towards the point*/
    T d1[3], d2[3], d3[3];
    i_unitize3(x1, d1);
    i_unitize3(x2, d2);
    i_unitize3(x3, d3);
    /*translated space points pp2, pp3 and vector product pp4=pp2 x pp3 stored in matrix LU*/
    T LU[9];
    /*coefficients of quartic polynomial*/
    T D[5];
    /*solutions of the quartic polynomial, up to 4 roots*/
    T roots[4];
    /*variables*/
    T u, u_sqr, rhs1, rhs2, s1, s2, s3, temp;    
    T two_a2_min_b2_cos_d1d2, two_c2, two_c2_cos_d2d3, two_c2_cos_d1d3, a2_cos_d1d3, b2_cos_d2d3, coeff_u, min_coeff_u2;
    T s1d1[3], s2d2[3], s3d3[3], r1[3], r2[3], r3[3];
    T XP[9];
    /*camera matrix pointer*/
    T *P_ref;
    /*Routine computes quartic polynomial and solves it using Merritt's method. 
      The leg-lengths are then computed from the roots and a vector
      product method is then used to find R and t. For labels and equations for finding
      the leg-lengths, see reference Haralick, Lee, Ottenberg, Nolle, 'Review and
      Analysis of Solutions of the Three Point Perspective Pose Estimation Problem', IJCV 13,3,331-356(1994).
      Once the leg-lengths have been found that gives the points in the camera coordinate
      system. The world points are put with the first point at the origin and the same
      for the points in the camera coordinate system. The second and third point
      plus their cross product is computed. Solving with two points in the camera
      coordinate system as right hand sides using Cramers rule gives the first two rows
      of R corresponding to each solution. The last row is then found by cross product*/
    /*compute the cosines between image direction vectors*/
    T cos_d2d3 = i_dot3(d2, d3);
    T cos_d1d3 = i_dot3(d1, d3);
    T cos_d1d2 = i_dot3(d1, d2);
    /*compute squares of cosines*/
    T cos_d2d3_sqr = i_sqr(cos_d2d3); 
    T cos_d1d3_sqr = i_sqr(cos_d1d3); 
    T cos_d1d2_sqr = i_sqr(cos_d1d2); 
    T cos_d2d3_cos_d1d3 = cos_d2d3*cos_d1d3;
    /*Compute pp2 and pp3, that is space points
    translated -p1 so that pp1 would be at the origin.*/ 
    i_sub3(p2, p1, LU);
    i_sub3(p3, p1, LU+3);
    /*compute squared distances between space points*/
    T sd_p2p3 = i_squaresum_diff3(p2, p3);
    T sd_p1p3 = i_squaresum3(LU+3);       
    T sd_p1p2 = i_squaresum3(LU);         
    /*compute common scale factors*/
    T a2_c2 = sd_p2p3*sd_p1p2;
    T b2_c2 = sd_p1p3*sd_p1p2;
    T a2_min_b2 = sd_p2p3 - sd_p1p3;
    T a2_min_c2 = sd_p2p3 - sd_p1p2;
    T b2_min_c2 = sd_p1p3 - sd_p1p2;
    T a2_plus_b2 = sd_p2p3 + sd_p1p3;
    T a2_min_b2_min_c2  = a2_min_b2 - sd_p1p2;
    T a2_min_b2_plus_c2 = a2_min_b2 + sd_p1p2;
    T a2_plus_b2_min_c2 = a2_plus_b2 - sd_p1p2;
    T two_cos_d1d2 = cos_d1d2 * (T)(2.0);
    /*Compute coefficients of quartic polynomial: D4u^4+D3u^3+D2u^2+D1u+D0*/
    D[0] = a2_c2*cos_d1d3_sqr - a2_min_b2_plus_c2*a2_min_b2_plus_c2*(T)(0.25);
    D[1] = (a2_min_b2*a2_min_b2_plus_c2 - a2_c2*cos_d1d3_sqr*(T)(2.0))*cos_d1d2 - (sd_p1p2*b2_min_c2 + a2_c2)*cos_d2d3_cos_d1d3;
    D[2] = sd_p1p2*(a2_min_c2*cos_d1d3_sqr
           + a2_plus_b2*cos_d2d3_cos_d1d3*two_cos_d1d2
           + b2_min_c2*cos_d2d3_sqr)
           - a2_min_b2*a2_min_b2*cos_d1d2_sqr
           - a2_min_b2_min_c2*a2_min_b2_plus_c2*(T)(0.5);
    D[3] = (a2_min_b2_min_c2*a2_min_b2 - b2_c2*cos_d2d3_sqr*(T)(2.0))*cos_d1d2
           - sd_p1p2*a2_plus_b2_min_c2*cos_d2d3_cos_d1d3;
    D[4] = b2_c2*cos_d2d3_sqr - a2_min_b2_min_c2*a2_min_b2_min_c2*(T)(0.25);
    /*solve the polynomial equation*/
    nr_solutions = i_quartic_solve_closed(D, roots);
  
    if (nr_solutions)
    {
      i_cross(LU, LU+3, LU+6);
      /*compute common terms used in the loop over roots below
      in first equation for determining third leg-length*/
      two_a2_min_b2_cos_d1d2 = two_cos_d1d2*a2_min_b2;
      two_c2 = sd_p1p2 * (T)2.0;
      two_c2_cos_d2d3 = two_c2*cos_d2d3;
      two_c2_cos_d1d3 = two_c2*cos_d1d3;
      /*compute common terms used in the loop over roots below
      in second equation for determining third leg-length*/
      a2_cos_d1d3 = sd_p2p3*cos_d1d3;
      b2_cos_d2d3 = sd_p1p3*cos_d2d3;
      coeff_u = a2_cos_d1d3*two_cos_d1d2 + b2_min_c2*cos_d2d3;
      min_coeff_u2 = a2_min_c2*cos_d1d3 + b2_cos_d2d3*two_cos_d1d2;
      /*Loop over the roots, up to four roots*/
      for (int i = 0; i<nr_solutions; i++)
      {
        u = roots[i];
        u_sqr = i_sqr(u);
        /*determine the first two leg-lengths*/
        temp = (T)1.0 + u_sqr - u*two_cos_d1d2;
        temp = (temp >= (T)0.0) ? temp : (T)1.0;
        s1 = i_sqrt( i_div(sd_p1p2, temp) );
        s2 = u*s1;
        rhs1 = u*two_a2_min_b2_cos_d1d2 - u_sqr*a2_min_b2_min_c2 - a2_min_b2_plus_c2;
        rhs2 = (T)(2.0)*(a2_cos_d1d3 - u_sqr*(u*b2_cos_d2d3 - min_coeff_u2) - u*coeff_u);
        /*select the most numerically stable method to determine the third leg-length*/
        if (i_abs(rhs1) >= i_abs(rhs2))
        {
          temp = (u*two_c2_cos_d2d3 - two_c2_cos_d1d3);
          temp = (temp != (T)0.0) ? temp : (T)1.0;
          s3 = i_div(s1*rhs1, temp);
        }
        else
        {
          temp = (u_sqr*a2_min_b2_min_c2 - u*two_a2_min_b2_cos_d1d2 + a2_min_b2_plus_c2);
          temp = (temp != (T)0.0) ? temp : (T)1.0;
          s3 = i_div(s1*rhs2, temp); 
        }
        P_ref = P + 12*i; 
        i_scale3(d1, s1d1, s1);
        i_scale3(d2, s2d2, s2);
        i_scale3(d3, s3d3, s3);
        i_sub3(s2d2, s1d1, XP);
        i_sub3(s3d3, s1d1, XP+3);
        i_cross(XP,  XP+3, XP+6);
        i_transpose_3x3(XP);
        i_solve_3x3(LU, XP,   r1);
        i_solve_3x3(LU, XP+3, r2);
        i_cross(r1, r2, r3);
        i_copy3(r1, P_ref);
        i_copy3(r2, P_ref+4);
        i_copy3(r3, P_ref+8);
        P_ref[3]  = s1d1[0] - i_dot3(r1, p1); 
        P_ref[7]  = s1d1[1] - i_dot3(r2, p1); 
        P_ref[11] = s1d1[2] - i_dot3(r3, p1); 
      }
    }
    return nr_solutions;
  }

  /*This routine solves the three point perspective pose problem with a fourth point to differentiate the unique pose.
  The input normalized (applied K^-1) image points {x1, x2, x3, x4} should be in homogeneous coordinates.
  The input world points {p1, p2, p3, p4} should be in inhomogeneous coordinates.
  Number of solutions (0 or 1) will be returned.*/
  template<typename T> inline int i_three_point_unique_pose(const T x1[3], const T x2[3], const T x3[3], const T x4[3],
                                                            const T p1[3], const T p2[3], const T p3[3], const T p4[3], T P[12])
  {
    T Poses[48];
    int index_best = 0;
    T rp_err_0, rp_err_1, rp_err_2, rp_err_3, rp_err_best;
    int nr_solutions = i_three_point_pose(x1, x2, x3, p1, p2, p3, Poses);
    if (!nr_solutions)
    {
      i_compose_canonical_normalized_P_matrix(P);
      return 0; /*means no solution!!!*/
    }
    if (nr_solutions == 1)
    {
      i_copy12(Poses, P);
      return 1;
    }
    else /*at least two solutions*/
    {
      rp_err_0 = i_reproject_sqr_error_inhomogeneous(x4, Poses,      p4);
      rp_err_1 = i_reproject_sqr_error_inhomogeneous(x4, Poses + 12, p4);
      if (rp_err_0 < rp_err_1)
      {
        index_best = 0;
        rp_err_best = rp_err_0;
      }
      else
      {
        index_best = 1;
        rp_err_best = rp_err_1;
      }
      if (nr_solutions > 2) /*at least three solutions*/
      {
        rp_err_2 = i_reproject_sqr_error_inhomogeneous(x4, Poses + 24, p4);
        if (rp_err_2 < rp_err_best)
        {
          index_best = 2;
          rp_err_best = rp_err_2;
        }
        if (nr_solutions > 3) /*four solutions*/
        {
          rp_err_3 = i_reproject_sqr_error_inhomogeneous(x4, Poses + 36, p4);
          if (rp_err_3 < rp_err_best)
          {
            index_best = 3;
            rp_err_best = rp_err_3; /*not necessary to record rp_err_best*/
          }
        }
      }
    }
    /*assign the best P matrix*/
    i_copy12(Poses+index_best*12, P);
    return 1;
  }

  /*The input normalized(applied K^-1) image points{ x1, x2, x3, x4 } should be in homogeneous coordinates.
    The input world points{ p1, p2, p3, p4 } should be in inhomogeneous coordinates.*/
  template<typename T>
  inline void i_p3p_ransac_hypogenfunc(const T x[12], const T p[12], T P[12])
  {
    i_three_point_unique_pose(x, x + 3, x + 6, x + 9, p, p + 3, p + 6, p + 9, P); 
  }

  template<typename T>
  inline void i_p3p_ransac_costfunc(const T P[12], const T* x, const T* p, int n, int &nr_liner, int *inliers, T &cost, T error_tol)
  {
    int i;
    T proj_err = (T)0.0;
    nr_liner = 0;
    cost = (T)0.0;
    T dx, dy, dz;
    const T *refx = x;
    const T *refp = p;
    for (i = 0; i < n; i++)
    {
      dx = i_dot3(P,     refp)     + P[3];
      dy = i_dot3(P + 4, refp) + P[7];
      dz = i_dot3(P + 8, refp) + P[11];
      dx = i_div(dx, dz) - refx[0];
      dy = i_div(dy, dz) - refx[1];
      proj_err = i_sqrt(dx*dx + dy*dy);
      if (proj_err < error_tol)
      {
        inliers[nr_liner++] = i;
        cost += proj_err;
      }
      refx += 3;
      refp += 3;
    }
  }

  /*This routine optimize the camera pose with a set of 2D<->3D correspondeces using the Levenberg-Marquardt algorithm.
  The input image points {xs} should be in inhomogeneous coordinates.
  The input world 3D points {ps} should be in inhomogeneous coordinates.
  This class assumes that camera intrinsics are known*/
  template<typename T>
  class CalibratedCameraPoseOptimizer
  {
  public:
    typedef T data_type;
    static const int _m = 6;
    static const int _d = 2;
    CalibratedCameraPoseOptimizer();
    ~CalibratedCameraPoseOptimizer();
    void computeCost(const T *c, T *fvec) const;
    void computeJacobian(const T *c, T *J) const;
    int getNumUnknowns() const { return _m; }
    int getNumberResiduals() const { return _d*_n; };
    bool optimize(const T K[9], const T P[12], const T *xs, const T *ps, int n);
    void getPose(T P[12]) const;
    bool isFailed() const { return !_succeeded; }
    void setVerbose(bool verbose) { _verbose = verbose; }
  protected:
    T _k[9]; /*k matrix to model the camera intrinsics, always assume that K[1](skew) = 0.0*/
    T _c[_m]; /*6 parameters to model the camera pose [R|t]*/
    int _n;
    const T *_ps;
    const T *_xs;
    bool _succeeded;
    bool _verbose;
  };
  
  template<typename T>
  CalibratedCameraPoseOptimizer<T>::CalibratedCameraPoseOptimizer() : _n(0), _ps(NULL), _xs(NULL), _succeeded(false), _verbose(false)
  {
    i_zero9(_k);
    i_zero(_c, _m);
  }

  template<typename T>
  CalibratedCameraPoseOptimizer<T>::~CalibratedCameraPoseOptimizer(){}
  
  template<typename T>
  void CalibratedCameraPoseOptimizer<T>::getPose(T P[12]) const
  {
    T R[9], t[3];
    i_rot_rodrigues_3x3(_c, R);
    i_copy3(_c+3, t);
    i_compose_normalized_P_matrix_from_R_t(R, t, P);
  }

  template<typename T>
  bool CalibratedCameraPoseOptimizer<T>::optimize(const T K[9], const T P[12]/*initial normalized P matrix [R|t]*/, const T* xs, const T* ps, const int n/*# of 2D<->3D correspondeces*/)
  {
    T R[9], t[3], theta;
    _succeeded = false;
    if (n==0 || xs == NULL || ps == NULL)
    {
      return false;
    }
    /*alloc error vector fvec*/
    T* fvec = i_alloc<T>(n*_d);
    if (fvec == NULL)
    {
      return false;
    }
    _n = n;
    _xs = xs;
    _ps = ps;
    /*initialize camera parameters*/
    i_copy9(K, _k);
    i_decompose_R_t_from_normalized_P_matrix(P, R, t);
    i_rot_invert_rodrigues_3x3(R, _c, theta);
    i_copy3(t, _c+3);
  
    LevenbergMarquardt solver;
    solver.setVerbose(_verbose);
    _succeeded = solver.optimize(*this, _c, fvec);
    i_free(fvec);
    return _succeeded;
  }

  template<typename T>
  void CalibratedCameraPoseOptimizer<T>::computeCost(const T *c, T* fvec) const
  {
    T R[9], t[3];
    i_rot_rodrigues_3x3(c, R);
    i_copy3(c+3, t);
    const T *cptr_X = _ps;
    const T *cptr_x = _xs;
    T *ptr_f = fvec;
    for (int i = 0; i < _n; ++i)
    {
      i_reproject_vec_error_inhomogeneous(cptr_x, _k, R, t, cptr_X, ptr_f);
      cptr_X += 3;
      cptr_x += 2;
      ptr_f += _d;
    }
  }

  template<typename T>
  void CalibratedCameraPoseOptimizer<T>::computeJacobian(const T *c, T* J) const
  {
    /*J is 2*_n x 6*/
    int i, j_offset = _m * _d;
    T R[9], t[3], Rx[3], P[3], D[9][3]; 
    T P1, P2, P3, P3P3_rec, P1_dc[6], P2_dc[6], P3_dc[6];
    T X0, X1, X2, sfx, sfy;
    T fx = _k[0];
    T fy = _k[4];

    const T *cptr_X = _ps;
    T* ptr_J = J;
    i_rot_rodrigues_3x3(c, R, D);
    i_copy3(c + 3, t);

    for (i = 0; i < _n; ++i, cptr_X += 3, ptr_J += j_offset)
    {
      i_mult_Ax_3x3(R, cptr_X, Rx);
      i_add3(Rx, t, P);
      /*precompute variables*/
      X0 = cptr_X[0];
      X1 = cptr_X[1];
      X2 = cptr_X[2];
      P1 = P[0];
      P2 = P[1];
      P3 = P[2];
      P3P3_rec = i_rec(i_sqr(P3));
      sfx = P3P3_rec*fx;
      sfy = P3P3_rec*fy;
      /*compute derivatives wrt v1, v2, v3, t1, t2, and t3*/
      P1_dc[0] = D[0][0] * X0 + D[1][0] * X1 + D[2][0] * X2; /*derivative of v1*/
      P1_dc[1] = D[0][1] * X0 + D[1][1] * X1 + D[2][1] * X2; /*derivative of v2*/
      P1_dc[2] = D[0][2] * X0 + D[1][2] * X1 + D[2][2] * X2; /*derivative of v3*/
      P1_dc[3] = (T)1.0; /*derivative of t1*/
      P1_dc[4] = (T)0.0; /*derivative of t2*/
      P1_dc[5] = (T)0.0; /*derivative of t3*/
    
      P2_dc[0] = D[3][0] * X0 + D[4][0] * X1 + D[5][0] * X2; /*derivative of v1*/
      P2_dc[1] = D[3][1] * X0 + D[4][1] * X1 + D[5][1] * X2; /*derivative of v2*/
      P2_dc[2] = D[3][2] * X0 + D[4][2] * X1 + D[5][2] * X2; /*derivative of v3*/
      P2_dc[3] = (T)0.0; /*derivative of t1*/
      P2_dc[4] = (T)1.0; /*derivative of t2*/
      P2_dc[5] = (T)0.0; /*derivative of t3*/
    
      P3_dc[0] = D[6][0] * X0 + D[7][0] * X1 + D[8][0] * X2; /*derivative of v1*/
      P3_dc[1] = D[6][1] * X0 + D[7][1] * X1 + D[8][1] * X2; /*derivative of v2*/
      P3_dc[2] = D[6][2] * X0 + D[7][2] * X1 + D[8][2] * X2; /*derivative of v3*/
      P3_dc[3] = (T)0.0; /*derivative of t1*/
      P3_dc[4] = (T)0.0; /*derivative of t2*/
      P3_dc[5] = (T)1.0; /*derivative of t2*/
      
      /*dx/dc*/
      ptr_J[0] = (P1_dc[0]*P3 - P1*P3_dc[0])*sfx;
      ptr_J[1] = (P1_dc[1]*P3 - P1*P3_dc[1])*sfx;
      ptr_J[2] = (P1_dc[2]*P3 - P1*P3_dc[2])*sfx;
      ptr_J[3] = (P1_dc[3]*P3 - P1*P3_dc[3])*sfx;
      ptr_J[4] = (P1_dc[4]*P3 - P1*P3_dc[4])*sfx;
      ptr_J[5] = (P1_dc[5]*P3 - P1*P3_dc[5])*sfx;
      /*dy/dc*/
      ptr_J[6]  = (P2_dc[0]*P3 - P2*P3_dc[0])*sfy;
      ptr_J[7]  = (P2_dc[1]*P3 - P2*P3_dc[1])*sfy;
      ptr_J[8]  = (P2_dc[2]*P3 - P2*P3_dc[2])*sfy;
      ptr_J[9]  = (P2_dc[3]*P3 - P2*P3_dc[3])*sfy;
      ptr_J[10] = (P2_dc[4]*P3 - P2*P3_dc[4])*sfy;
      ptr_J[11] = (P2_dc[5]*P3 - P2*P3_dc[5])*sfy;
    }
  }
  //////////////////////////////////////////////////////////////////////////////////////////////////

  /*This routine optimize the camera pose with a set of 2D<->3D correspondeces using the Levenberg-Marquardt algorithm.
  The input "normalized" image points {xs} "K^-1 applied" should be in inhomogeneous coordinates.
  The input world 3D points {ps} should be in inhomogeneous coordinates.*/
  template<typename T>
  class CalibratedCameraPoseOptimizerNormalized
  {
  public:
    typedef T data_type;
    static const int _m = 6;
    static const int _d = 2;
    CalibratedCameraPoseOptimizerNormalized();
    ~CalibratedCameraPoseOptimizerNormalized();
    void computeCost(const T *c, T *fvec) const;
    void computeJacobian(const T *c, T *J) const;
    int getNumUnknowns() const { return _m; }
    int getNumberResiduals() const { return _d*_n; };
    bool optimize(const T P[12], const T *xs, const T *ps, int n);
    void getPose(T P[12]) const;
    bool isFailed() const { return !_succeeded; }
    void setVerbose(bool verbose) { _verbose = verbose; }
  protected:
    T _c[_m]; /*6 parameters to model the camera pose [R|t]*/
    int _n;
    const T *_ps;
    const T *_xs;
    bool _succeeded;
    bool _verbose;
  };

  template<typename T>
  CalibratedCameraPoseOptimizerNormalized<T>::CalibratedCameraPoseOptimizerNormalized() : _n(0), _ps(NULL), _xs(NULL), _succeeded(false), _verbose(false)
  {
    i_zero(_c, _m);
  }

  template<typename T>
  CalibratedCameraPoseOptimizerNormalized<T>::~CalibratedCameraPoseOptimizerNormalized(){}

  template<typename T>
  void CalibratedCameraPoseOptimizerNormalized<T>::getPose(T P[12]) const
  {
    T R[9], t[3];
    i_rot_rodrigues_3x3(_c, R);
    i_copy3(_c + 3, t);
    i_compose_normalized_P_matrix_from_R_t(R, t, P);
  }

  template<typename T>
  bool CalibratedCameraPoseOptimizerNormalized<T>::optimize(const T P[12]/*initial normalized P matrix [R|t]*/, const T* xs, const T* ps, const int n/*# of 2D<->3D correspondeces*/)
  {
    T R[9], t[3], theta;
    _succeeded = false;
    if (n == 0 || xs == NULL || ps == NULL)
    {
      return false;
    }
    /*alloc error vector fvec*/
    T* fvec = i_alloc<T>(n*_d);
    if (fvec == NULL)
    {
      return false;
    }
    _n = n;
    _xs = xs;
    _ps = ps;
    /*initialize camera parameters*/
    i_decompose_R_t_from_normalized_P_matrix(P, R, t);
    i_rot_invert_rodrigues_3x3(R, _c, theta);
    i_copy3(t, _c + 3);

    LevenbergMarquardt solver;
    solver.setVerbose(_verbose);
    _succeeded = solver.optimize(*this, _c, fvec);
    i_free(fvec);
    return _succeeded;
  }

  template<typename T>
  void CalibratedCameraPoseOptimizerNormalized<T>::computeCost(const T *c, T* fvec) const
  {
    T R[9], t[3];
    i_rot_rodrigues_3x3(c, R);
    i_copy3(c + 3, t);
    const T *cptr_X = _ps;
    const T *cptr_x = _xs;
    T *ptr_f = fvec;
    for (int i = 0; i < _n; ++i)
    {
      i_reproject_vec_error_inhomogeneous(cptr_x, R, t, cptr_X, ptr_f);
      cptr_X += 3;
      cptr_x += 2;
      ptr_f += _d;
    }
  }

  template<typename T>
  void CalibratedCameraPoseOptimizerNormalized<T>::computeJacobian(const T *c, T* J) const
  {
    /*J is 2*_n x 6*/
    int i, j_offset = _m *_d;
    T R[9], t[3], Rx[3], P[3], D[9][3];
    T P1, P2, P3, P3P3_rec, P1_dc[6], P2_dc[6], P3_dc[6];
    T X0, X1, X2;

    const T *cptr_X = _ps;
    T* ptr_J = J;
    i_rot_rodrigues_3x3(c, R, D);
    i_copy3(c + 3, t);

    for (i = 0; i < _n; ++i, cptr_X += 3, ptr_J += j_offset)
    {
      i_mult_Ax_3x3(R, cptr_X, Rx);
      i_add3(Rx, t, P);
      /*precompute variables*/
      X0 = cptr_X[0];
      X1 = cptr_X[1];
      X2 = cptr_X[2];
      P1 = P[0];
      P2 = P[1];
      P3 = P[2];
      P3P3_rec = i_rec(i_sqr(P3));
      //sfx = P3P3_rec*fx;
      //sfy = P3P3_rec*fy;
      /*compute derivatives wrt v1, v2, v3, t1, t2, and t3*/
      P1_dc[0] = D[0][0] * X0 + D[1][0] * X1 + D[2][0] * X2; /*derivative of v1*/
      P1_dc[1] = D[0][1] * X0 + D[1][1] * X1 + D[2][1] * X2; /*derivative of v2*/
      P1_dc[2] = D[0][2] * X0 + D[1][2] * X1 + D[2][2] * X2; /*derivative of v3*/
      P1_dc[3] = (T)1.0; /*derivative of t1*/
      P1_dc[4] = (T)0.0; /*derivative of t2*/
      P1_dc[5] = (T)0.0; /*derivative of t3*/

      P2_dc[0] = D[3][0] * X0 + D[4][0] * X1 + D[5][0] * X2; /*derivative of v1*/
      P2_dc[1] = D[3][1] * X0 + D[4][1] * X1 + D[5][1] * X2; /*derivative of v2*/
      P2_dc[2] = D[3][2] * X0 + D[4][2] * X1 + D[5][2] * X2; /*derivative of v3*/
      P2_dc[3] = (T)0.0; /*derivative of t1*/
      P2_dc[4] = (T)1.0; /*derivative of t2*/
      P2_dc[5] = (T)0.0; /*derivative of t3*/

      P3_dc[0] = D[6][0] * X0 + D[7][0] * X1 + D[8][0] * X2; /*derivative of v1*/
      P3_dc[1] = D[6][1] * X0 + D[7][1] * X1 + D[8][1] * X2; /*derivative of v2*/
      P3_dc[2] = D[6][2] * X0 + D[7][2] * X1 + D[8][2] * X2; /*derivative of v3*/
      P3_dc[3] = (T)0.0; /*derivative of t1*/
      P3_dc[4] = (T)0.0; /*derivative of t2*/
      P3_dc[5] = (T)1.0; /*derivative of t2*/

      /*dx/dc*/
      ptr_J[0] = (P1_dc[0] * P3 - P1*P3_dc[0])*P3P3_rec;
      ptr_J[1] = (P1_dc[1] * P3 - P1*P3_dc[1])*P3P3_rec;
      ptr_J[2] = (P1_dc[2] * P3 - P1*P3_dc[2])*P3P3_rec;
      ptr_J[3] = (P1_dc[3] * P3 - P1*P3_dc[3])*P3P3_rec;
      ptr_J[4] = (P1_dc[4] * P3 - P1*P3_dc[4])*P3P3_rec;
      ptr_J[5] = (P1_dc[5] * P3 - P1*P3_dc[5])*P3P3_rec;
      /*dy/dc*/
      ptr_J[6] = (P2_dc[0] * P3 - P2*P3_dc[0])*P3P3_rec;
      ptr_J[7] = (P2_dc[1] * P3 - P2*P3_dc[1])*P3P3_rec;
      ptr_J[8] = (P2_dc[2] * P3 - P2*P3_dc[2])*P3P3_rec;
      ptr_J[9] = (P2_dc[3] * P3 - P2*P3_dc[3])*P3P3_rec;
      ptr_J[10] = (P2_dc[4] * P3 - P2*P3_dc[4])*P3P3_rec;
      ptr_J[11] = (P2_dc[5] * P3 - P2*P3_dc[5])*P3P3_rec;
    }
  }

  /*This routine optimize the camera pose and camera focal length with a set of 2D<->3D correspondeces using the Levenberg-Marquardt algorithm.
  The input image points {xs} should be in inhomogeneous coordinates.
  The input world 3D points {ps} should be in inhomogeneous coordinates.
  This class assumes that two image focal lengths are identical, image centers are known and there is no image distortion*/
  template<typename T>
  class UnCalibratedCameraFocalPoseOptimizer
  {
  public:
    typedef T data_type;
    static const int _m = 7;
    static const int _d = 2;
    UnCalibratedCameraFocalPoseOptimizer();
    ~UnCalibratedCameraFocalPoseOptimizer();
    void computeCost(const T *c, T *fvec) const;
    void computeJacobian(const T *c, T *J) const;
    int getNumUnknowns() const { return _m; }
    int getNumberResiduals() const { return _d*_n; };
    bool optimize(const T K[9], const T P[12], const T *xs, const T *ps, int n);
    void getPose(T P[12]) const;
    void getFocal(T& f) const;
    bool isFailed() const { return !_succeeded; }
    void setVerbose(bool verbose) { _verbose = verbose; }
  protected:
    T _p[2]; //principle point - image center
    T _c[_m]; /*6 parameters to model the camera pose [R|t], additional 1 parameter to model the focal length*/
    int _n;
    const T *_ps;
    const T *_xs;
    bool _succeeded;
    bool _verbose;
  };

  template<typename T>
  UnCalibratedCameraFocalPoseOptimizer<T>::UnCalibratedCameraFocalPoseOptimizer() : _n(0), _ps(NULL), _xs(NULL), _succeeded(false), _verbose(false)
  {
    i_zero2(_p);
    i_zero(_c, _m);
  }

  template<typename T>
  UnCalibratedCameraFocalPoseOptimizer<T>::~UnCalibratedCameraFocalPoseOptimizer(){}

  template<typename T>
  void UnCalibratedCameraFocalPoseOptimizer<T>::getPose(T P[12]) const
  {
    T R[9], t[3];
    i_rot_rodrigues_3x3(_c, R);
    i_copy3(_c + 3, t);
    i_compose_normalized_P_matrix_from_R_t(R, t, P);
  }

  template<typename T>
  void UnCalibratedCameraFocalPoseOptimizer<T>::getFocal(T& focal) const
  {
    focal = _c[_m - 1];
  }

  template<typename T>
  bool UnCalibratedCameraFocalPoseOptimizer<T>::optimize(const T K[9]/*(K0+K4)/2 is the initial focal length*/, const T P[12]/*initial normalized P matrix [R|t]*/, const T *xs, const T *ps, int n)
  {
    T R[9], t[3], theta;
    _succeeded = false;
    if (n == 0 || xs == NULL || ps == NULL)
    {
      return false;
    }
    /*alloc error vector fvec*/
    T* fvec = i_alloc<T>(n*_d);
    if (fvec == NULL)
    {
      return false;
    }
    _n = n;
    _xs = xs;
    _ps = ps;
    /*initialize camera parameters*/    
    _p[0] = K[2];
    _p[1] = K[5];

    i_decompose_R_t_from_normalized_P_matrix(P, R, t);
    i_rot_invert_rodrigues_3x3(R, _c, theta);
    i_copy3(t, _c + 3);
    _c[_m - 1] = i_average(K[0], K[4]); /*(K0+K4)/2 is the initial focal length*/

    LevenbergMarquardt solver;
    solver.setVerbose(_verbose);
    _succeeded = solver.optimize(*this, _c, fvec);
    i_free(fvec);
    return _succeeded;
  }

  template<typename T>
  void UnCalibratedCameraFocalPoseOptimizer<T>::computeCost(const T *c, T* fvec) const
  {
    T R[9], t[3], focal;
    i_rot_rodrigues_3x3(c, R);
    i_copy3(c + 3, t);
    focal = c[_m - 1];
    const T *cptr_X = _ps;
    const T *cptr_x = _xs;
    T *ptr_f = fvec;
    for (int i = 0; i < _n; ++i)
    {
      i_reproject_vec_error_inhomogeneous(cptr_x, focal, _p[0], _p[1], R, t, cptr_X, ptr_f);
      cptr_X += 3;
      cptr_x += 2;
      ptr_f += _d;
    }
  }

  template<typename T>
  void UnCalibratedCameraFocalPoseOptimizer<T>::computeJacobian(const T *c, T* J) const
  {
    /*J is 2*_n x 7*/
    int i, j_offset = _m * _d;
    T R[9], t[3], focal, focal_rec, Rx[3], P[3], D[9][3];
    T P1, P2, P3, P1_dc[7], P2_dc[7], P3_dc[7];
    T X0, X1, X2, P3P3_rec, sf;

    const T *cptr_X = _ps;
    T* ptr_J = J;
    i_rot_rodrigues_3x3(c, R, D);
    i_copy3(c + 3, t);
    focal = c[_m - 1];
    focal_rec = i_rec(focal);

    P1_dc[3] = (T)1.0;// focal;  /*derivative of t1*/
    P1_dc[4] = (T)0.0; /*derivative of t2*/
    P1_dc[5] = (T)0.0; /*derivative of t3*/

    P2_dc[3] = (T)0.0; /*derivative of t1*/
    P2_dc[4] = (T)1.0; //focal;  /*derivative of t2*/
    P2_dc[5] = (T)0.0; /*derivative of t3*/

    P3_dc[3] = (T)0.0; /*derivative of t1*/
    P3_dc[4] = (T)0.0; /*derivative of t2*/
    P3_dc[5] = (T)1.0; /*derivative of t2*/
    P3_dc[6] = (T)0.0; /*derivative of focal*/

    for (i = 0; i < _n; ++i, cptr_X += 3, ptr_J += j_offset)
    {
      i_mult_Ax_3x3(R, cptr_X, Rx);
      i_add3(Rx, t, P);
      /*precompute variables*/
      X0 = cptr_X[0];
      X1 = cptr_X[1];
      X2 = cptr_X[2];
      P1 = P[0];// *focal;
      P2 = P[1];// *focal;
      P3 = P[2];
      P3P3_rec = i_rec(i_sqr(P3));
      sf = focal*P3P3_rec;
      /*compute derivatives wrt v1, v2, v3, t1, t2, t3, and f*/
      P1_dc[0] = (D[0][0] * X0 + D[1][0] * X1 + D[2][0] * X2); //*focal; /*derivative of v1*/
      P1_dc[1] = (D[0][1] * X0 + D[1][1] * X1 + D[2][1] * X2); //*focal; /*derivative of v2*/
      P1_dc[2] = (D[0][2] * X0 + D[1][2] * X1 + D[2][2] * X2); //*focal; /*derivative of v3*/
      P1_dc[6] = P[0] * focal_rec;/*derivative of focal*/

      P2_dc[0] = (D[3][0] * X0 + D[4][0] * X1 + D[5][0] * X2); /*derivative of v1*/
      P2_dc[1] = (D[3][1] * X0 + D[4][1] * X1 + D[5][1] * X2); /*derivative of v2*/
      P2_dc[2] = (D[3][2] * X0 + D[4][2] * X1 + D[5][2] * X2); /*derivative of v3*/
      P2_dc[6] = P[1] * focal_rec; /*derivative of focal*/

      P3_dc[0] = D[6][0] * X0 + D[7][0] * X1 + D[8][0] * X2; /*derivative of v1*/
      P3_dc[1] = D[6][1] * X0 + D[7][1] * X1 + D[8][1] * X2; /*derivative of v2*/
      P3_dc[2] = D[6][2] * X0 + D[7][2] * X1 + D[8][2] * X2; /*derivative of v3*/
    
      /*dx/dc*/
      ptr_J[0] = (P1_dc[0] * P3 - P1 * P3_dc[0])*sf;
      ptr_J[1] = (P1_dc[1] * P3 - P1 * P3_dc[1])*sf;
      ptr_J[2] = (P1_dc[2] * P3 - P1 * P3_dc[2])*sf;
      ptr_J[3] = P3*sf;
      ptr_J[4] = (T)0.0;
      ptr_J[5] = (          -P1                )*sf;
      ptr_J[6] = (P1_dc[6] * P3)*sf;
      
      /*dy/dc*/
      ptr_J[7] = (P2_dc[0] * P3 - P2 * P3_dc[0])*sf;
      ptr_J[8] = (P2_dc[1] * P3 - P2 * P3_dc[1])*sf;
      ptr_J[9] = (P2_dc[2] * P3 - P2 * P3_dc[2])*sf;
      ptr_J[10] = (T)0.0;
      ptr_J[11] = P3*sf;
      ptr_J[12] = (         - P2                )*sf;
      ptr_J[13] = (P2_dc[6] * P3)*sf;
    }
  }
  //////////////////////////////////////////////////////////////////////////////////////////////////
} /* namespace idl */