#pragma once

#include "../numeric/i_nullspace.h"
#include "../optimize/i_lm.h"
#include "i_util.h"

namespace idl
{
  /*Simple linear triangulation methods. The estimated point does not exactly satisfy the geometric relations,
    and it is not an optimal estimate. The function assumes the input points have been pre-normalized for numerical
    stability. 
    If nullspace_method is :
    0, Close-from solution is used to construct the null space of three 4-vectors
    1, LU is used to find the nullspace
    2, SVD is used to find the nullspace
    else, always use Close-form
    If normalize is true the output vector is divided by the last element.*/
  template <typename T> inline void i_triangulate_dlt(const T x[3], const T xp[3], const T P1[12], const T P2[12], T X[4], int null_space_method = 0, bool normalize = true)
  {
    T A[12]; /*does not need to be 4x4 since this is a redundant set of equations, solution is determined up to scale.*/
    T u = x[0];
    T v = x[1];
    T w = xp[0]; 
    A[ 0] = u*P1[8] - P1[0];   A[ 1] = u*P1[9] - P1[1];   A[ 2] = u*P1[10] - P1[2];   A[ 3] = u*P1[11] - P1[3];
    A[ 4] = v*P1[8] - P1[4];   A[ 5] = v*P1[9] - P1[5];   A[ 6] = v*P1[10] - P1[6];   A[ 7] = v*P1[11] - P1[7];
    A[ 8] = w*P2[8] - P2[0];   A[ 9] = w*P2[9] - P2[1];   A[10] = w*P2[10] - P2[2];   A[11] = w*P2[11] - P2[3];
     /*LU and close-form solution seems more stable than the SVD*/
    switch (null_space_method)
    {
    case 0:  i_cross(A, A + 4, A + 8, X); break;
    case 1:  i_nullspace_lu_3x4_destroy(A, X); break;
    case 2:  i_nullspace_svd_3x4(A, X); break;
    default: i_cross(A, A + 4, A + 8, X); break;
    }
    if (normalize)
    {
      i_homogeneous_unitize4(X);
    }
  }

  /*Simple linear triangulation methods. The estimated point does not exactly satisfy the geometric relations,
  and it is not an optimal estimate. The function assumes the input points have been pre-normalized for numerical
  stability. This routinue assumes P1 is canonical, i.e., P1 = [I|0].
  If nullspace_method is :
  0, Close-from solution is used to construct the null space of three 4-vectors
  1, LU is used to find the nullspace
  2, SVD is used to find the nullspace
  else, always use Close-form
  If normalize is true the output vector is divided by the last element.*/
  template <typename T> inline void i_triangulate_dlt_canonical(const T x[3], const T xp[3], const T P2[12], T X[4], int null_space_method = 0, bool normalize = true)
  {
    T A[12]; /*does not need to be 4x4 since this is a redundant set of equations, solution is determined up to scale.*/
    T u = x[0];
    T v = x[1];
    T w = xp[0];
    A[0] = -(T)1.0;  A[1] =  (T)0.0;   A[2] = u;   A[3] = (T)0.0;
    A[4] = (T)0.0;   A[5] = -(T)1.0;   A[6] = v;   A[7] = (T)0.0;
    A[8] = w*P2[8] - P2[0];   A[9] = w*P2[9] - P2[1];   A[10] = w*P2[10] - P2[2];   A[11] = w*P2[11] - P2[3];
    /*LU and close-form solution seems more stable than the SVD*/
    switch (null_space_method)
    {
    case 0:  i_cross(A, A + 4, A + 8, X); break;
    case 1:  i_nullspace_lu_3x4_destroy(A, X); break;
    case 2:  i_nullspace_svd_3x4(A, X); break;
    default: i_cross(A, A + 4, A + 8, X); break;
    }
    if (normalize)
    {
      i_homogeneous_unitize4(X);
    }
  }

  /*This triangulation routine computes the homogeneous space point X from
    the input point pair x <-> xp observed in two views represented by camera
    matrices P1 and P2 and with fundamental matrix F (such that xp'*F*x=0)
    It does so by intersecting three planes backprojected from lines. 
    The lines generating the planes are the: 
    1. epipolar line defined by the first image point,
    2. the line through the second image point that is perpendicular to the first line 
    3. the line through the first image point that is perpendicular to the epipolar
       line defined by the second image point. 
    This scheme finds the world point on the ray backprojected from the first image 
    point that minimizes the reprojection error in the second image. It can triangulate 
    world points at infinity correctly and is invariant to projective transformations 
    of the world space.
    If normalize is true the output vector is divided by the last element.*/
  template <typename T> inline void i_triangulate_second_view_error(const T x[3], const T xp[3], const T P1[12], const T P2[12], const T F[9], T X[4], bool normalize = true)
  {
    /*In this routine. Two lines through x are chosen judiciously, and backprojected.
    The plane from the second camera is done the same way. Finally, a 4D cross product is taken
    between the three planes to get the 3D point*/
    T Fx[3], el[3], elp[3], l[3], lp[3], PA[4], PB[4], PC[4];
   
    /*Compute first two coordinates of epipolar line (elp) in first image*/
    i_mult_Atx_3x3(F, xp, elp);
    elp[2] = (T)0.0;
    /*Find line l through x perpendicular in the image to epipolar line.
    A line through x perpendicular to elp in the image can be found by taking the point
    at infinity in the direction perpendicular to elp, which is the first two coordinates of elp,
    and taking the cross product with x.*/
    i_cross(elp, x, l);

    /*Compute line Fx in second image*/
    i_mult_Ax_3x3(F, x, el);
    i_copy3(el, Fx);
    el[2] = (T)0.0;
    /*Find line lp through xp perpendicular in the image to epipolar line.
    A line through xp perpendicular to el in the image can be found by taking the point
    at infinity in the direction perpendicular to el, which is the first two coordinates of el,
    and taking the cross product with xp.*/
    i_cross(el, xp, lp);

    /*Backproject it to plane PA: the set of points in space mapping to a line l via the camera matrix
    P is the plane (P^t)l - for proof, see p. 197 of Multiple View Geometry in Computer Vision, 2nd edition.*/
    i_mult_Atx_4x3(P1, l,  PA);
    i_mult_Atx_4x3(P2, lp, PB);
    i_mult_Atx_4x3(P2, Fx, PC);
    /*Compute the nullvector to x, y and z = > intersection of three planes PA, PB, PC = > a point X*/
    i_cross(PA, PB, PC, X);
    /*Normalize coordinate vector if desired*/
    if (normalize)
    { 
      i_homogeneous_unitize4(X);
    }
  }

  /*This triangulation routine computes the homogeneous space point X from
  the input point pair x <-> xp observed in two views represented by camera
  matrices P1 and P2 and with fundamental matrix F (such that xp'*F*x=0)
  It is assumed that the first camera is canonical (P1 = [I|0]).
  It does so by intersecting three planes backprojected from lines.
  The lines generating the planes are the:
  1. epipolar line defined by the first image point,
  2. the line through the second image point that is perpendicular to the first line
  3. the line through the first image point that is perpendicular to the epipolar
  line defined by the second image point.
  This scheme finds the world point on the ray backprojected from the first image
  point that minimizes the reprojection error in the second image. It can triangulate
  world points at infinity correctly and is invariant to projective transformations
  of the world space.
  If normalize is true the output vector is divided by the last element.*/
  template <typename T> inline void i_triangulate_second_view_error_canonical(const T x[3], const T xp[3], const T P2[12], const T F[9], T X[4], bool normalize = true)
  {
    /*In this routine. Two lines through x are chosen judiciously, and backprojected.
    The plane from the second camera is done the same way. Finally, a 4D cross product is taken
    between the three planes to get the 3D point*/
    T Fx[3], el[3], elp[3], l[3], lp[3], PA[4], PB[4], PC[4];

    /*Compute first two coordinates of epipolar line (elp) in first image*/
    i_mult_Atx_3x3(F, xp, elp);
    elp[2] = (T)0.0;
    /*Find line l through x perpendicular in the image to epipolar line.
    A line through x perpendicular to elp in the image can be found by taking the point
    at infinity in the direction perpendicular to elp, which is the first two coordinates of elp,
    and taking the cross product with x.*/
    i_cross(elp, x, l);

    /*Compute line Fx in second image*/
    i_mult_Ax_3x3(F, x, el);
    i_copy3(el, Fx);
    el[2] = (T)0.0;
    /*Find line lp through xp perpendicular in the image to epipolar line.
    A line through xp perpendicular to el in the image can be found by taking the point
    at infinity in the direction perpendicular to el, which is the first two coordinates of el,
    and taking the cross product with xp.*/
    i_cross(el, xp, lp);

    /*Backproject it to plane PA: the set of points in space mapping to a line l via the camera matrix
    P is the plane (P^t)l - for proof, see p. 197 of Multiple View Geometry in Computer Vision, 2nd edition.*/
    PA[3] = (T)0;
    i_copy3(l, PA); //i_mult_Atx_4x3(P1, l, PA); 
    i_mult_Atx_4x3(P2, lp, PB);
    i_mult_Atx_4x3(P2, Fx, PC);
    /*Compute the nullvector to x, y and z = > intersection of three planes PA, PB, PC = > a point X*/
    i_cross(PA, PB, PC, X);
    /*Normalize coordinate vector if desired*/
    if (normalize)
    {
      i_homogeneous_unitize4(X);
    }
  }

  /*This routine computes the "Confidence Ellipsoids" of a triangulated 3D scene point X,
  The two cameras are assumed to be canonical, image coordinates x and xp are in the normalized space, i.e., K^-1 has been applied
  X, x, xp are all in homogeneous coordinates, and P1, P2 have the form P=[R|t];
  the confidence is approximated by the square root of the quotient of the smallest and the largest
  singular values of the covariance matrix, the measure lies between 0 and 1, is invariant to scale changes and only depends on the relative geometry of the two camera
  poses.*/
  template <typename T>
  inline T i_triangulation_confidence_canonical(const T X[4], const T x[3], const T xp[3], const T P1[12], const T P2[3])
  {
    T B1[6], B2[6]/*2x3*/, B1B1t[4], B2B2t[4]/*2x2*/, C[9]/*3x3*/, A[16], Ai[16], Ait[16], Cxx[16]/*4x4*/, tmp[16]/*4x4*/, Wi[16]/*4x4 --> (W^-1)*/, WiXXtWi[16], XXt[16], PX[3], J[12]/*3x4*/, w[3]/*singular values*/, sf, sf2;
    T confidence = (T)0;

    /*construct A matrix, using B as tmp memory*/
    B1[0] = (T)0; B1[1] = -x[2];   B1[2] =  x[1];
    B1[3] = x[2]; B1[4] = (T)0;    B1[5] = -x[0];
    B2[0] = (T)0;  B2[1] = -xp[2]; B2[2] =  xp[1];
    B2[3] = xp[2]; B2[4] = (T)0;   B2[5] = -xp[0];

    i_mult_AB_2x3_3x4(B1, P1, A);
    i_mult_AB_2x3_3x4(B2, P2, A + 8);

    /*construct B1, B2 matrices*/
    i_mult_Ax_3x4(P1, X, PX);
    B1[0] = (T)0;  B1[1] = PX[2]; B1[2] = -PX[1];
    B1[3] = -PX[2]; B1[4] = (T)0; B1[5] = PX[0];

    i_mult_Ax_3x4(P2, X, PX);
    B2[0] = (T)0;  B2[1] = PX[2]; B2[2] = -PX[1];
    B2[3] = -PX[2]; B2[4] = (T)0;  B2[5] = PX[0];

    /*construct the G matrix*/
    i_mult_AAt_2x3(B1, B1B1t);
    i_mult_AAt_2x3(B2, B2B2t);
    /*W = At(G^-1)A*/
    /*W^-1 = (A^-1)G(At^1) = (A^-1)G(A^-1)t*/

    /*compute A^-1 and (A^-1)t*/
    i_lu_invert_4x4(A, Ai);
    i_transpose_4x4(Ai, Ait);

    /*compute (W^-1) = (A^-1)G(A^-1)t*/
    i_mult_AB_4x4_w_A_block_diagonal(B1B1t, B2B2t, Ait, tmp);
    i_mult_AB_4x4_4x4(Ai, tmp, Wi);

    /*compute XXt*/
    i_mult_AAt_4x1(X, XXt);

    /*compute (W^-1)XXt(W^-1)*/
    i_mult_AB_4x4_4x4(XXt, Wi, tmp);
    i_mult_AB_4x4_4x4(Wi, tmp, WiXXtWi);

    /*compute scale-factor sf*/
    i_mult_Ax_4x4(Wi, X, tmp);
    sf = -i_rec(i_dot4(X, tmp));

    /*compute Cxx*/
    i_scale16(WiXXtWi, sf);
    i_add16(Wi, WiXXtWi, Cxx);

    /*compute J*/
    sf = i_rec(X[3]);
    sf2 = sf*sf;
    J[0] = sf;  J[1] = (T)0; J[2] = (T)0; J[3] = -(X[0] * sf2);
    J[4] = (T)0;  J[5] = sf; J[6] = (T)0; J[7] = -(X[1] * sf2);
    J[8] = (T)0;  J[9] = (T)0; J[10] = sf; J[11] = -(X[2] * sf2);

    /*compute JCxxJt*/
    i_mult_ABt_4x4_3x4(Cxx, J, tmp);
    i_mult_AB_3x4_4x3(J, tmp, C);

    /*SVD*/
    i_singular_values_3x3(C, w);
    if ((w[0] < 0 && w[2] < 0) || (w[0] > 0 && w[2] > 0))
    {
      confidence = i_sqrt(i_div(w[2], w[0]));
    }

    return confidence;
  }

  /*This routine computes the "Confidence Ellipsoids" of a triangulated 3D scene point X,
    The two cameras are assumed to be non-canonical, image coordinates x and xp are in the un-normalized image space, i.e., K^-1 has NOT been applied
    X, x, xp are all in homogeneous coordinates, the confidence is approximated by the square root of the quotient of the smallest and the largest
    singular values of the covariance matrix, the measure lies between 0 and 1, is invariant to scale changes and only depends on the relative geometry of the two camera
    poses.*/
  template <typename T> 
  inline T i_triangulation_confidence(const T X[4], const T x[3], const T xp[3],
                                      const T K1[9], const T R1[9], const T t1[3],
                                      const T K2[9], const T R2[9], const T t2[3])
  {
    T K1inv[9], K2inv[9], B1[6], B2[6]/*2x3*/, P1[12], P2[12]/*3x4*/, B1B1t[4], B2B2t[4]/*2x2*/, C[9]/*3x3*/, A[16], Ai[16], Ait[16], Cxx[16]/*4x4*/, tmp[16]/*4x4*/, Wi[16], WiXXtWi[16]/*4x4 --> (W^-1)*/, XXt[16], PX[3], J[12]/*3x4*/, w[3]/*singular values*/, sf, sf2;
    T nx[3], nxp[3]; /*normalized image coordinates - applied K^-1*/

    i_invert_3x3(K1, K1inv);
    i_invert_3x3(K2, K2inv);
    
    i_mult_Ax_3x3(K1inv,  x,  nx);
    i_mult_Ax_3x3(K2inv, xp, nxp);
   
    i_compose_normalized_P_matrix_from_R_t(R1, t1, P1);
    i_compose_normalized_P_matrix_from_R_t(R2, t2, P2);

    /*construct A matrix, using B as tmp memory*/
    B1[0] = (T)0; B1[1] = -nx[2];   B1[2] = nx[1];
    B1[3] = nx[2]; B1[4] = (T)0;    B1[5] = -nx[0];
    B2[0] = (T)0;  B2[1] = -nxp[2]; B2[2] = nxp[1];  
    B2[3] = nxp[2]; B2[4] = (T)0;   B2[5] = -nxp[0]; 

    i_mult_AB_2x3_3x4(B1, P1,   A);
    i_mult_AB_2x3_3x4(B2, P2, A+8);
    
    /*construct B1, B2 matrices*/
    i_mult_Ax_3x4(P1, X, PX);
    B1[0] = (T)0;  B1[1] = PX[2]; B1[2] = -PX[1];
    B1[3] = -PX[2]; B1[4] = (T)0; B1[5] =  PX[0];
    i_mult_Ax_3x4(P2, X, PX);
    B2[0] = (T)0;  B2[1] = PX[2]; B2[2] = -PX[1];
    B2[3] = -PX[2]; B2[4] = (T)0;  B2[5] = PX[0];

    /*construct the G matrix*/
    i_mult_AAt_2x3(B1, B1B1t);
    i_mult_AAt_2x3(B2, B2B2t);

    /*W = At(G^-1)A*/
    /*W^-1 = (A^-1)G(At^1) = (A^-1)G(A^-1)t*/

    /*compute A^-1 and (A^-1)t*/
    i_lu_invert_4x4(A, Ai);
    i_transpose_4x4(Ai, Ait);

    /*compute W^-1 = (A^-1)G(A^-1)t*/
    i_mult_AB_4x4_w_A_block_diagonal(B1B1t, B2B2t, Ait, tmp);
    i_mult_AB_4x4_4x4(Ai, tmp, Wi);

    /*compute XXt*/
    i_mult_AAt_4x1(X, XXt);
    
    /*compute (W^-1)XXt(W^-1)*/
    i_mult_AB_4x4_4x4(XXt, Wi, tmp);
    i_mult_AB_4x4_4x4(Wi, tmp, WiXXtWi);

    /*compute scale-factor sf*/
    i_mult_Ax_4x4(Wi, X, tmp);
    sf = -i_rec(i_dot4(X, tmp));

    /*compute Cxx*/
    i_scale16(WiXXtWi, sf);
    i_add16(Wi, WiXXtWi, Cxx);

    /*compute J*/
    sf = i_rec(X[3]);
    sf2 = sf*sf;
    J[0] =   sf;  J[1] = (T)0; J[2] = (T)0; J[ 3] = -(X[0] * sf2);
    J[4] = (T)0;  J[5] =   sf; J[6] = (T)0; J[ 7] = -(X[1] * sf2);
    J[8] = (T)0;  J[9] = (T)0; J[10] =  sf; J[11] = -(X[2] * sf2);

    /*compute JCxxJt*/
    i_mult_ABt_4x4_3x4(Cxx, J, tmp);
    i_mult_AB_3x4_4x3(J, tmp, C);

    /*SVD*/
    i_singular_values_3x3(C, w);

    T confidence = (T)0;
    
    if ((w[0] < 0 && w[2] < 0) || (w[0] > 0 && w[2] > 0))
    {
      confidence = i_sqrt(i_div(w[2], w[0]));
    }

    return confidence;
  }

  template<typename T>
  class PairviewTriangulationOptimizer
  {
  public:
    typedef T data_type;
    static const int _m = 4;
    static const int _d = 2;
    PairviewTriangulationOptimizer();
    ~PairviewTriangulationOptimizer();
    void computeCost(const T X[4], T *fvec) const;
    void computeJacobian(const T X[4], T *J) const;
    int getNumUnknowns() const { return _m; }
    int getNumberResiduals() const { return _d*2; };
    bool optimize(const T X[4], const T P1[12], const T P2[12], const T x1[2], const T x2[2]);
    void getPoint(T X[4], bool normalize = true) const;
    void setVerbose(bool verbose) { _verbose = verbose; }
    bool isFailed() const { return !_succeeded; }
  protected:
    T _X3d[_m]; /*4 parameters to model the 3D point in homogeneous coordinate system*/
    T _P1[12], _P2[12];
    T _x1[2], _x2[2];
    bool _succeeded;
    bool _verbose;
  };

  template<typename T>
  PairviewTriangulationOptimizer<T>::PairviewTriangulationOptimizer() : _succeeded(false), _verbose(false)
  {
     i_zero(_X3d, _m);
     i_zero2(_x1);
     i_zero2(_x2);
  }

  template<typename T>
  PairviewTriangulationOptimizer<T>::~PairviewTriangulationOptimizer(){}

  template<typename T>
  bool PairviewTriangulationOptimizer<T>::optimize(const T X[4], const T P1[12], const T P2[12], const T x1[2], const T x2[2])
  {
    _succeeded = false;
    if (P1 == NULL || P2 == NULL || x1 == NULL || x2 == NULL)
    {
      return false;
    }
    T fvec[4];
    /*initialize fixed parameters*/
    i_copy12(P1, _P1);
    i_copy12(P2, _P2);
    i_copy2(x1,  _x1);
    i_copy2(x2,  _x2);
    /*set initial guess for 3D point*/
    i_copy4(X, _X3d);
    LevenbergMarquardt solver;
    solver.setVerbose(_verbose);
    _succeeded = solver.optimize(*this, _X3d, fvec);
    return _succeeded;
  }

  template<typename T>
  void PairviewTriangulationOptimizer<T>::getPoint(T X[4], bool normalize) const
  {
    i_copy4(_X3d, X);
    if (normalize)
    {
      i_homogeneous_unitize4(X);
    }
  }

  template<typename T>
  void PairviewTriangulationOptimizer<T>::computeCost(const T X[4], T *fvec) const
  {
    i_reproject_vec_error_homogeneous(_x1, _P1, X, fvec);
    i_reproject_vec_error_homogeneous(_x2, _P2, X, fvec+2);
  }

  template<typename T>
  void PairviewTriangulationOptimizer<T>::computeJacobian(const T X[4], T *J) const
  {
    T P0X, P1X, P2X, P2X_sqr;
    P0X = i_dot4(_P1,     X);
    P1X = i_dot4(_P1 + 4, X);
    P2X = i_dot4(_P1 + 8, X);
    P2X_sqr = i_sqr(P2X);
    J[0] = i_div(_P1[0] * P2X - P0X * _P1[ 8], P2X_sqr);
    J[1] = i_div(_P1[1] * P2X - P0X * _P1[ 9], P2X_sqr);
    J[2] = i_div(_P1[2] * P2X - P0X * _P1[10], P2X_sqr);
    J[3] = i_div(_P1[3] * P2X - P0X * _P1[11], P2X_sqr);
    J[4] = i_div(_P1[4] * P2X - P1X * _P1[ 8], P2X_sqr);
    J[5] = i_div(_P1[5] * P2X - P1X * _P1[ 9], P2X_sqr);
    J[6] = i_div(_P1[6] * P2X - P1X * _P1[10], P2X_sqr);
    J[7] = i_div(_P1[7] * P2X - P1X * _P1[11], P2X_sqr);

    P0X = i_dot4(_P2,     X);
    P1X = i_dot4(_P2 + 4, X);
    P2X = i_dot4(_P2 + 8, X);
    P2X_sqr = i_sqr(P2X);
    J[8]  = i_div(_P2[0] * P2X - P0X * _P2[ 8],  P2X_sqr);
    J[9]  = i_div(_P2[1] * P2X - P0X * _P2[ 9],  P2X_sqr);
    J[10] = i_div(_P2[2] * P2X - P0X * _P2[10],  P2X_sqr);
    J[11] = i_div(_P2[3] * P2X - P0X * _P2[11],  P2X_sqr);
    J[12] = i_div(_P2[4] * P2X - P1X * _P2[ 8],  P2X_sqr);
    J[13] = i_div(_P2[5] * P2X - P1X * _P2[ 9],  P2X_sqr);
    J[14] = i_div(_P2[6] * P2X - P1X * _P2[10],  P2X_sqr);
    J[15] = i_div(_P2[7] * P2X - P1X * _P2[11],  P2X_sqr);
  }

}/* namespace idl */