import numpy as np
from scipy.linalg import logm
# from mathutils import Euler, Matrix, Vector

import transforms3d
from transforms3d.quaternions import qmult, qinverse,mat2quat
from transforms3d.euler import  mat2euler, euler2quat,euler2mat

# halcon mat to angels
def se3_mat2euler(pose):
    R=mat2euler(pose[:3,:3],axes='szyx')
    return np.rad2deg(R)
        
#euler2mat #hacon model  real model
def se3_euler2mat(theta,t):
    rx=theta[0]
    ry=theta[1]
    rz=theta[2] 
    R=euler2mat(rz,ry,rx,axes='szyx')
    t=np.array(t)
    return transforms3d.affines.compose(t,R,np.ones(3))
    
# RT is a 3x4 matrix
def se3_inverse(RT):
    R = RT[0:3, 0:3]
    T = RT[0:3, 3].reshape((3,1))
    RT_new = np.zeros((3, 4), dtype=np.float32)
    RT_new[0:3, 0:3] = R.transpose()
    RT_new[0:3, 3] = -1 * np.dot(R.transpose(), T).reshape((3))
    return RT_new

def se3_mul(RT1, RT2):
    R1 = RT1[0:3, 0:3]
    T1 = RT1[0:3, 3].reshape((3,1))

    R2 = RT2[0:3, 0:3]
    T2 = RT2[0:3, 3].reshape((3,1))

    RT_new = np.zeros((3, 4), dtype=np.float32)
    RT_new[0:3, 0:3] = np.dot(R1, R2)
    T_new = np.dot(R1, T2) + T1
    RT_new[0:3, 3] = T_new.reshape((3))
    return RT_new

def se3_mat2quat(pose):
    return mat2quat(pose[:3,:3])




def egocentric2allocentric(qt, T):
    dx = np.arctan2(T[0], -T[2])
    dy = np.arctan2(T[1], -T[2])
    quat = euler2quat(-dy, -dx, 0, axes='sxyz')
    quat = qmult(qinverse(quat), qt)
    return quat


def allocentric2egocentric(qt, T):
    dx = np.arctan2(T[0], -T[2])
    dy = np.arctan2(T[1], -T[2])
    quat = euler2quat(-dy, -dx, 0, axes='sxyz')
    quat = qmult(quat, qt)
    return quat


def T_inv_transform(T_src, T_tgt):
    '''
    :param T_src: 
    :param T_tgt:
    :return: T_delta: delta in pixel 
    '''
    T_delta = np.zeros((3, ), dtype=np.float32)

    T_delta[0] = T_tgt[0] / T_tgt[2] - T_src[0] / T_src[2]
    T_delta[1] = T_tgt[1] / T_tgt[2] - T_src[1] / T_src[2]
    T_delta[2] = np.log(T_src[2] / T_tgt[2])

    return T_delta

def rotation_x(theta):
    t = theta * np.pi / 180.0
    R = np.zeros((3, 3), dtype=np.float32)
    R[0, 0] = 1
    R[1, 1] = np.cos(t)
    R[1, 2] = -np.sin(t)
    R[2, 1] = np.sin(t)
    R[2, 2] = np.cos(t)
    return R

def rotation_y(theta):
    t = theta * np.pi / 180.0
    R = np.zeros((3, 3), dtype=np.float32)
    R[0, 0] = np.cos(t)
    R[0, 2] = np.sin(t)
    R[1, 1] = 1
    R[2, 0] = -np.sin(t)
    R[2, 2] = np.cos(t)
    return R

def rotation_z(theta):
    t = theta * np.pi / 180.0
    R = np.zeros((3, 3), dtype=np.float32)
    R[0, 0] = np.cos(t)
    R[0, 1] = -np.sin(t)
    R[1, 0] = np.sin(t)
    R[1, 1] = np.cos(t)
    R[2, 2] = 1
    return R


def inverse_matrix_world(matrix_4x4):
    """
    Inverse matrix transform
    """
    rotation = matrix_4x4[:3, :3]
    translation = matrix_4x4[:3, 3]
    r_transpose_x = rotation[0, 0] * translation[0] + rotation[1, 0] * translation[1] + rotation[2, 0] * translation[2]
    r_transpose_y = rotation[0, 1] * translation[0] + rotation[1, 1] * translation[1] + rotation[2, 1] * translation[2]
    r_transpose_z = rotation[0, 2] * translation[0] + rotation[1, 2] * translation[1] + rotation[2, 2] * translation[2]
    matrix_world_inverse = np.array([
        [rotation[0, 0], rotation[1, 0], rotation[2, 0], -r_transpose_x],
        [rotation[0, 1], rotation[1, 1], rotation[2, 1], -r_transpose_y],
        [rotation[0, 2], rotation[1, 2], rotation[2, 2], -r_transpose_z],
        [0, 0, 0, 1.0]])
    return matrix_world_inverse

# use mathutils
'''
def opencv2opengl(cam_matrix_world):
    """
    Change coordinate system from OpenCV to OpenGL or from OpenGL to OpenCV
    """
    from scipy.spatial.transform import Rotation as R
    rot180x = R.from_euler('x', 180, degrees=True).as_matrix()
    rotation = cam_matrix_world[:3, :3]
    translation = cam_matrix_world[:3, 3]
    output = np.copy(cam_matrix_world)
    output[:3, :3] = np.asarray(Matrix(rot180x) @ Matrix(rotation).to_3x3())
    output[:3, 3] = np.asarray(Matrix(rot180x) @ Vector(translation))
    return output

'''

def get_camera_location_from_obj_pose(obj_pose):
    """
    R_tranpose x (-T)
    """
    trans = obj_pose[:3, 3]
    T_cam = obj_pose[:3, :3].T.dot(-trans)
    T_cam = T_cam / np.linalg.norm(T_cam)
    return T_cam


def look_at(location):
    """
    Get object pose from a viewpoint location
    # Taken from https://github.com/thodan/bop_toolkit/blob/master/bop_toolkit_lib/view_sampler.py#L216
    IMPORTANT: output of this function is the object pose defined in OPENGL coordinate convention
    """
    f = -np.array(location)  # Forward direction.
    f /= np.linalg.norm(f)

    u = np.array([0.0, 0.0, 1.0])  # Up direction.
    s = np.cross(f, u)  # Side direction.
    if np.count_nonzero(s) == 0:
        # f and u are parallel, i.e. we are looking along or against Z axis.
        s = np.array([1.0, 0.0, 0.0])
    s /= np.linalg.norm(s)
    u = np.cross(s, f)  # Recompute up.
    R = np.array([[s[0], s[1], s[2]],
                  [u[0], u[1], u[2]],
                  [-f[0], -f[1], -f[2]]])
    t = - R.dot(np.array(location).reshape((3, 1)))
    obj_pose = np.zeros((4, 4))
    obj_pose[:3, :3] = R
    obj_pose[:3, 3] = -t.reshape(-1)
    obj_pose[3, 3] = 1
    return obj_pose


def remove_inplane_rotation(opencv_pose, return_symmetry_rot=False):
    """
    TODO: this function can be improved and simplified
    """
    cam_location = get_camera_location_from_obj_pose(opencv_pose)
    obj_opengl_pose = look_at(cam_location)
    opencv_pose_wo_inplane = opencv2opengl(obj_opengl_pose)
    opencv_pose_wo_inplane[:3, 3] = opencv_pose[:3, 3]  # do not change the translation
    if return_symmetry_rot:
        opposite_cam_location = cam_location
        opposite_cam_location[:2] *= -1
        obj_opengl_pose_opposite = look_at(opposite_cam_location)
        opencv_pose_wo_inplane_opposite = opencv2opengl(obj_opengl_pose_opposite)
        opencv_pose_wo_inplane_opposite[:3, 3] = opencv_pose[:3, 3]  # do not change the translation
        return opencv_pose_wo_inplane, opencv_pose_wo_inplane_opposite
    else:
        return opencv_pose_wo_inplane


def perspective(K, obj_pose, pts):
    results = np.zeros((len(pts), 2))
    for i in range(len(pts)):
        R, T = obj_pose[:3, :3], obj_pose[:3, 3]
        rep = np.matmul(K, np.matmul(R, pts[i].reshape(3, 1)) + T.reshape(3, 1))
        x = np.int32(rep[0] / rep[2])  # as matplot flip  x axis
        y = np.int32(rep[1] / rep[2])
        results[i] = [x, y]
    return results


def geodesic_numpy(R1, R2):
    theta = (np.trace(R2.dot(R1.T)) - 1) / 2
    theta = np.clip(theta, -1, 1)
    return np.degrees(np.arccos(theta))

if __name__=='__main__':
    R = rotation_x(30)
    print(R)
    print(R.dot(R.T))
    print(np.linalg.det(R))
    print(np.linalg.inv(R))
    print(np.linalg.inv(R).dot(R))