#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 30 17:04:07 2024

@author: lixiao
"""
import numpy as np
from scipy.spatial.transform import Rotation

from computeFundamental import fundamentalEightPointNormalized
from computeFundamental import fundamentalRANSAC

from triangulation import linearTriangulation


def disambiguateRelativePose(Rots, u3, points0_h, points1_h, K1, K2):
    """DISAMBIGUATERELATIVEPOSE- finds the correct relative camera pose (among
    four possible configurations) by returning the one that yields points
    lying in front of the image plane (with positive depth).

    Arguments:
      Rots -  3x3x2: the two possible rotations returned by decomposeEssentialMatrix
      u3   -  a 3x1 vector with the translation information returned by decomposeEssentialMatrix
      p1   -  3xN homogeneous coordinates of point correspondences in image 1
      p2   -  3xN homogeneous coordinates of point correspondences in image 2
      K1   -  3x3 calibration matrix for camera 1
      K2   -  3x3 calibration matrix for camera 2

    Returns:
      R -  3x3 the correct rotation matrix
      T -  3x1 the correct translation vector

      where [R|t] = T_C2_C1 = T_C2_W is a transformation that maps points
      from the world coordinate system (identical to the coordinate system of camera 1)
      to camera 2.
    """

    # Projection matrix of camera 1
    M1 = K1 @ np.eye(3, 4)

    total_points_in_front_best = 0
    for iRot in range(2):
        R_C2_C1_test = Rots[:, :, iRot]

        for iSignT in range(2):
            T_C2_C1_test = u3 * (-1) ** iSignT

            M2 = K2 @ np.c_[R_C2_C1_test, T_C2_C1_test]
            P_C1 = linearTriangulation(points0_h, points1_h, M1, M2)

            # project in both cameras
            P_C2 = np.c_[R_C2_C1_test, T_C2_C1_test] @ P_C1

            num_points_in_front1 = np.sum(P_C1[2, :] > 0)
            num_points_in_front2 = np.sum(P_C2[2, :] > 0)
            total_points_in_front = num_points_in_front1 + num_points_in_front2

            if total_points_in_front > total_points_in_front_best:
                # Keep the rotation that gives the highest number of points
                # in front of both cameras
                R = R_C2_C1_test
                T = T_C2_C1_test
                total_points_in_front_best = total_points_in_front

    return R, T


def estimateEssentialMatrix(p1, p2, K1, K2):
    """estimates the essential matrix given matching point coordinates,
       and the camera calibration K

    Input: point correspondences
     - p1 np.ndarray(3,N): homogeneous coordinates of 2-D points in image 1
     - p2 np.ndarray(3,N): homogeneous coordinates of 2-D points in image 2
     - K1 np.ndarray(3,3): calibration matrix of camera 1
     - K2 np.ndarray(3,3): calibration matrix of camera 2

    Output:
     - E np.ndarray(3,3) : fundamental matrix
    """
    F = fundamentalRANSAC(p1, p2)
    # F = fundamentalEightPointNormalized(p1, p2);
    # Compute the essential matrix from the fundamental matrix given K
    E = K2.T @ F @ K1
    return E


def decomposeEssentialMatrix(E):
    """Given an essential matrix, compute the camera motion, i.e.,  R and T such
    that E ~ T_x R

    Input:
      - E(3,3) : Essential matrix

    Output:
      - R(3,3,2) : the two possible rotations
      - u3(3,1)   : a vector with the translation information
    """

    u, _, vh = np.linalg.svd(E)

    # Translation
    u3 = u[:, 2]

    # Rotations
    W = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])

    R = np.zeros((3, 3, 2))
    R[:, :, 0] = u @ W @ vh
    R[:, :, 1] = u @ W.T @ vh

    for i in range(2):
        if np.linalg.det(R[:, :, i]) < 0:
            R[:, :, i] *= -1

    if np.linalg.norm(u3) != 0:
        u3 /= np.linalg.norm(u3)

    return R, u3


def decompose_camera(P=None):
    """
    Decomposition of a camera projection matrix

    Args:
    P : array-like, shape (3, 4)
        Input camera projection matrix. If not provided, a default test matrix is used.
    Returns:
    K : array, shape (3, 3)
        Calibration matrix.
    Rc_w : array, shape (3, 3)
        Rotation matrix defining the world coordinate frame in terms of the camera frame.
    Pc : array, shape (3,)
        Camera centre position in world coordinates.
    pp : array, shape (2,)
        Image principal point.
    pv : array, shape (3,)
        Principal vector from the camera centre through pp pointing out from the camera.

    Reference: Hartley and Zisserman 2nd Ed. pp 155-164
    """
    # Projection matrix from Hartley and Zisserman p 163 used for testing
    if P is None:
        P = np.array(
            [
                [3.53553e2, 3.39645e2, 2.77744e2, -1.44946e6],
                [-1.03528e2, 2.33212e1, 4.59607e2, -6.32525e5],
                [7.07107e-1, -3.53553e-1, 6.12372e-1, -9.18559e2],
            ]
        )
    # Convenience variables for the columns of P
    p1 = P[:, 0]
    p2 = P[:, 1]
    p3 = P[:, 2]
    p4 = P[:, 3]
    M = np.column_stack((p1, p2, p3))
    m3 = M[2, :]
    # Camera centre, analytic solution
    X = np.linalg.det(np.column_stack((p2, p3, p4)))
    Y = -np.linalg.det(np.column_stack((p1, p3, p4)))
    Z = np.linalg.det(np.column_stack((p1, p2, p4)))
    T = -np.linalg.det(np.column_stack((p1, p2, p3)))
    Pc = np.array([X, Y, Z, T]) / T
    Pc = Pc[:3]
    # Principal point
    pp = M @ m3
    pp = pp / pp[2]
    pp = pp[:2]
    # Principal ray pointing out of camera
    pv = np.linalg.det(M) * m3
    pv = pv / np.linalg.norm(pv)
    # Perform RQ decomposition of M matrix
    K, Rc_w = rq3(M)
    # Check if R is right handed
    if np.dot(np.cross(Rc_w[:, 0], Rc_w[:, 1]), Rc_w[:, 2]) < 0:
        print("Note that rotation matrix is left handed")

    # note: t = -Rc_w @ Pc, Hartley and Zisserman pp.156
    return K, Rc_w, Pc, pp, pv


def rq3(M):
    """
    Perform RQ decomposition of a 3x3 matrix M.

    Args:
    M : array-like, shape (3, 3)
        Input 3x3 matrix for RQ decomposition.

    Returns:
    K : array, shape (3, 3)
        Camera intrinsinc matrix.
    R : array, shape (3, 3)
        Rotation matrix.

    Refrence: https://ksimek.github.io/2012/08/14/decompose/
    """
    R, K = np.linalg.qr(np.flipud(M).T)
    K = np.fliplr(np.flipud(K.T))
    R = np.flipud(R.T)

    T = np.diag(np.sign(np.diag(K)))
    R = T @ R
    K = K @ T
    return K, R


def computeRelativePose(p1, p2, K):
    # Estimate the essential matrix E using the 8-point algorithm
    E = estimateEssentialMatrix(p1, p2, K, K)

    # Extract the relative camera positions (R,T) from the essential matrix
    # Obtain extrinsic parameters (R,t) from E
    Rots, u3 = decomposeEssentialMatrix(E)

    # Disambiguate among the four possible configurations
    R_C2_W, t_C2_W = disambiguateRelativePose(Rots, u3, p1, p2, K, K)

    return R_C2_W, t_C2_W


def skew(w):
    """
    Return the skew-symmetric matrix of a 3x1 vector w.
    """
    return np.array([[0, -w[2], w[1]], [w[2], 0, -w[0]], [-w[1], w[0], 0]])


def SE3_to_se3(T):
    """
    Convert given SE(3) matrix to se(3) vector representation using logarithmic map (Log).
    Args:
    T: 4x4 SE(3) matrix
    Returns:
    ksi: 6x1 se(3) vector
    """
    R = T[:3, :3]
    r = Rotation.from_matrix(R)
    w = r.as_rotvec()  # angular velocity vector
    theta = np.linalg.norm(w)
    if np.isclose(theta, 0):  # Handling zero rotation
        v = T[:3, 3]
    else:
        G_inv = (
            np.eye(3)
            - 0.5 * skew(w)
            + (1 / (theta ** 2))
            * (1 - (theta * np.sin(theta)) / (2 * (1 - np.cos(theta))))
            * np.dot(skew(w), skew(w))
        )
        v = np.dot(G_inv, T[:3, 3])
    ksi = np.concatenate((v, w))
    return ksi


def se3_to_SE3(ksi):
    """
    Convert given se(3) vector to SE(3) matrix representation using the exponential map (Exp).
    Args:
    ksi: 6x1 se(3) vector
    Returns:
    T: 4x4 SE(3) matrix
    """
    v = ksi[:3]
    w = ksi[3:]
    theta = np.linalg.norm(w)
    if np.isclose(theta, 0):  # Handling zero angular velocity
        R = np.eye(3)
        t = v
    else:
        w_hat = skew(w)
        R = (
            np.eye(3)
            + np.sin(theta) / theta * w_hat
            + (1 - np.cos(theta)) / (theta ** 2) * np.dot(w_hat, w_hat)
        )
        V = (
            np.eye(3)
            + (1 - np.cos(theta)) / (theta ** 2) * w_hat
            + (theta - np.sin(theta)) / (theta ** 3) * np.dot(w_hat, w_hat)
        )
        t = np.dot(V, v)
    T = np.eye(4)
    T[:3, :3] = R
    T[:3, 3] = t
    return T


# def SE3_to_se3(T):
#     """
#     Convert given SE(3) matrix to se(3) vector representation
#     using the logarithmic map (Log).

#     Args:
#     T: 4x4 SE(3) matrix
#     Returns:
#     ksi: 6x1 se(3) vector
#     """
#     R = T[:3, :3]
#     theta = np.arccos((np.trace(R) - 1) / 2)
#     if np.isclose(theta, 0):
#         w = np.array([0, 0, 0])
#         v = T[:3, 3]
#         ksi = np.concatenate((v, w))
#     else:
#         w = (
#             theta
#             / (2 * np.sin(theta))
#             * np.array([R[2, 1] - R[1, 2], R[0, 2] - R[2, 0], R[1, 0] - R[0, 1]])
#         )
#         wx = np.array([[0, -w[2], w[1]], [w[2], 0, -w[0]], [-w[1], w[0], 0]])
#         t = T[:3, 3]
#         v = (
#             np.eye(3)
#             - 0.5 * wx
#             + (
#                 1 / (theta ** 2)
#                 * (1 - theta * np.sin(theta) / (2 * (1 - np.cos(theta))))
#                 * np.dot(wx, wx)
#             )
#         ) @ t
#         ksi = np.concatenate((v, w))
#     return ksi


# def se3_to_SE3(ksi):
#     """
#     Converts given se(3) vector to SE(3) matrix representation using the exponential map (Exp).
#     Args:
#     ksi: 6x1 se(3) vector
#     Returns:
#     T: 4x4 SE(3) matrix
#     """

#     # Splitting the twist vector into angular and translational velocity vectors
#     v = ksi[:3]
#     w = ksi[3:]

#     # Constructing the rotational exponential coordinates matrix
#     w_skewed = np.array([[0, -w[2], w[1]], [w[2], 0, -w[0]], [-w[1], w[0], 0]])

#     # Handing zero rotation
#     if np.allclose(w, 0):
#         R = np.eye(3)
#         t = v
#     else:
#         # Rodrigues formula
#         R = (
#             np.eye(3)
#             + (w_skewed / np.linalg.norm(w)) * np.sin(np.linalg.norm(w))
#             + ((np.dot(w_skewed, w_skewed)) / (np.linalg.norm(w) ** 2))
#             * (1 - np.cos(np.linalg.norm(w)))
#         )
#         # Computing the 3x1 translational vector (expression from ethan eade doc)
#         t = (
#             np.eye(3)
#             + (((1 - np.cos(np.linalg.norm(w))) / (np.linalg.norm(w) ** 2)) * w_skewed)
#             + (
#                 (
#                     (np.linalg.norm(w) - np.sin(np.linalg.norm(w)))
#                     / (np.linalg.norm(w) ** 3)
#                 )
#                 * (np.dot(w_skewed, w_skewed))
#             )
#         ) @ v
#     # Constructing the 4x4 transformation matrix
#     T = np.eye(4)
#     T[:3, :3] = R
#     T[:3, 3] = t

#     return T

 
def __test_decompose_camera():
    # 使用示例
    K, Rc_w, Pc, pp, pv = decompose_camera()
    print("Calibration matrix K:\n", K)
    print("Rotation matrix Rc_w:\n", Rc_w)
    print("Camera center position Pc:", Pc)
    print("Image principal point pp:", pp)
    print("Principal vector pv:", pv)


def __test_computeRelativePose():
    p1 = np.loadtxt("./matches0001.txt")
    p2 = np.loadtxt("./matches0002.txt")

    p1 = np.r_[p1, np.ones((1, p1.shape[1]))]
    p2 = np.r_[p2, np.ones((1, p2.shape[1]))]

    K = np.array([[1379.74, 0, 760.35], [0, 1382.08, 503.41], [0, 0, 1]])

    """
  R:
  array([[ 0.98061327, -0.0048195 , -0.19589381],
         [ 0.00445489,  0.99998743, -0.0023018 ],
         [ 0.19590244,  0.00138448,  0.98062241]])
  t:
  array([ 0.99949045,  0.00217848, -0.03184481])
  """

    R, t = computeRelativePose(p1, p2, K)
    print("R:\n", R)
    print("t:\n", t)


def __test_SE3_to_se3():
    K, R, Pc, _, _ = decompose_camera()
    T = np.r_[np.c_[R, -R @ Pc], np.array([[0, 0, 0, 1]])]
    ksi = SE3_to_se3(T)
    T_ksi = se3_to_SE3(ksi)
    print("error:\n", T - T_ksi)


if __name__ == "__main__":
    np.set_printoptions(suppress=True, precision=6)
    # __test_computeRelativePose
    # __test_decompose_camera()
    __test_SE3_to_se3()
