import os
import cv2
import glob
import numpy as np
from scipy.spatial.transform import Rotation as R

# 刚体1,5个点 采集了位姿
p1 = np.array([74.1533, 26.0825, 0.751836]) * 0.001
p2 = np.array([-18.9287, 25.2773, 9.06192]) * 0.001
p3 = np.array([49.7518, -2.02081, 4.0981]) * 0.001
p4 = np.array([-52.3204, 15.8919, 6.59897]) * 0.001
p5 = np.array([-52.656, -65.2309, -20.5108]) * 0.001
point_ob = np.array([p1, p2, p3, p4, p5])
K = np.load("/media/liyuke/dataset/vicon4/intrinsic.npy")

# 刚体2,计算到关键点的坐标
k1 = np.array([-82.7474, -14.8933, 1.61748]) * 0.001
k2 = np.array([-74.9785, 39.4116, 7.68901]) * 0.001
k3 = np.array([-89.0516, -117.471, -4.27828]) * 0.001
k4 = np.array([91.4125, -127.425, -20.9297]) * 0.001
k5 = np.array([125.815, 95.5882, 1.93138]) * 0.001
k6 = np.array([-52.6015, 135.736, 22.0145]) * 0.001
k7 = np.array([82.1518, -10.9465, -8.044391]) * 0.001
point_ob2 = np.array([k1, k2, k3, k4, k5, k6, k7])

# 刚体3 四个螺旋桨
f1 = np.array([-84.5098,-255.104,-11.8674])* 0.001
f2 = np.array([133.309,49.6186,-13.2071])* 0.001
f3 = np.array([-41.7997,208.485,25.0745])* 0.001
f4 = np.array([-286.518,-143.988,18.9697])* 0.001
point_ob3 = np.array([f1,f2,f3,f4])
# 刚体4 四个脚架
d1 = np.array([122.893,104.79,5.31562])* 0.001
d2 = np.array([-66.3829,155.768,-1.37663])* 0.001
d3 = np.array([-123.69,-113.278,-6.76084])* 0.001
d4 = np.array([67.1808,-147.281,2.82149])* 0.001
point_ob4 = np.array([d1,d2,d3,d4])

# obtain 6D pose,including rotation and translation
def read_truth_Rt(labpath):
    if os.path.getsize(labpath):
        with open(labpath, 'r') as f:
            contents = f.readlines()
        uav2vicon_translation = np.array([eval(contents[i].split(" ")[1].strip()) for i in range(5, 8)])
        quat = [eval(contents[i].split(" ")[1].strip()) for i in range(1, 5)]
        r = R.from_quat(quat)
        uav2vicon_rotation = r.as_matrix()
        uav2vicon_T = np.concatenate((uav2vicon_rotation, uav2vicon_translation.reshape(3, 1)), axis=1)
        uav2vicon_T_h = np.concatenate((uav2vicon_T, np.array([[0, 0, 0, 1]])), axis=0)

        # transformation matrix obtained by reprojection error minimization method
        vicon2camera_T = np.load("/media/liyuke/dataset/vicon4/vicon2camera.npy")
        vicon2camera_T_h = np.concatenate((vicon2camera_T,np.array([[0,0,0,1]])),axis=0)
        # vicon2camera_T_h = np.array([[0.04921299, 0.99851924, 0.02318229, 0.51932544],
        #                              [0.09646206, 0.01835025, -0.9951675, 0.4418432],
        #                              [-0.9941193, 0.05121138, -0.09541615, 3.4678586],
        #                              [0, 0, 0, 1]])

        # transformation matrix: MAV coordinate frame with respect to camera coordinate frame
        # uav2camera_T_h = uav2vicon_T_h.dot(vicon2camera_T_h)
        uav2camera_T_h = vicon2camera_T_h.dot(uav2vicon_T_h)
        rotation_mat = uav2camera_T_h[:3, :3]
        print("rotation matrix = {}".format(uav2camera_T_h[:3, :3]))
        translation_mat = uav2camera_T_h[:3, 3].reshape(3, 1)
        print("translation matrix = {}".format(uav2camera_T_h[:3, 3] * 1000))

    return rotation_mat, translation_mat


def test_extrinsic(img_path, rotation_matrix, translation_matrix):
    p_camera = np.dot(rotation_matrix, point_all.T) + translation_matrix
    p_img = np.dot(K, p_camera)
    p_img = p_img[:2, :] / p_img[2,]
    img = cv2.imread(img_path)
    for i in range(20):
        cv2.circle(img, (int(p_img[0][i]), int(p_img[1][i])), 3, (0, 0, 255), -1)
    cv2.imshow("A", img)
    cv2.waitKey()


def ob2_ob1_coord():
    ob2_path = "/media/liyuke/dataset/vicon/sub_camera/obj3/obj3.txt"
    ob1_path = "/media/liyuke/dataset/vicon/sub_camera/obj3/obj1.txt"
    # for ob2
    with open(ob2_path,"r") as f:
        contents = f.readlines()
    ob22vicon_translation = np.array([eval(contents[i].split(" ")[1].strip()) for i in range(5, 8)])
    quat = [eval(contents[i].split(" ")[1].strip()) for i in range(1, 5)]
    r = R.from_quat(quat)
    ob22vicon_rotation = r.as_matrix()
    ob22vicon_T = np.concatenate((ob22vicon_rotation, ob22vicon_translation.reshape(3, 1)), axis=1)
    ob22vicon_T_h = np.concatenate((ob22vicon_T, np.array([[0, 0, 0, 1]])), axis=0)
    # for ob1
    with open(ob1_path,"r") as f:
        contents = f.readlines()
    ob12vicon_translation = np.array([eval(contents[i].split(" ")[1].strip()) for i in range(5, 8)])
    quat = [eval(contents[i].split(" ")[1].strip()) for i in range(1, 5)]
    r = R.from_quat(quat)
    ob12vicon_rotation = r.as_matrix()

    T_vicon2ob1 = np.eye(4)
    T_vicon2ob1[:3,:3] = ob12vicon_rotation.T
    T_vicon2ob1[:3,3] = - np.dot(ob12vicon_rotation.T,ob12vicon_translation)

    T_21 = np.dot(T_vicon2ob1,ob22vicon_T_h)
    point_ob2_h = np.concatenate((point_ob3,np.ones((4,1))),axis=1)
    P_homogeneous = np.dot(T_21, point_ob2_h.T)
    point_ob2_in1 = P_homogeneous[:3].T
    # np.save("/media/liyuke/dataset/vicon/sub_camera/ob3.npy",point_ob2_in1)
    return point_ob2_in1

point_ob1_in1 = np.load("/media/liyuke/dataset/vicon4/ob1.npy")
point_ob2_in1 = np.load("/media/liyuke/dataset/vicon4/ob2.npy")
point_ob3_in1 = np.load("/media/liyuke/dataset/vicon4/ob3.npy")
point_ob4_in1 = np.load("/media/liyuke/dataset/vicon4/ob4.npy")
point_all = np.concatenate((point_ob1_in1,point_ob2_in1,point_ob3_in1,point_ob4_in1),axis=0)


if __name__ == "__main__":
    root = "/media/liyuke/dataset/vicon4/seq1/"
    filenames = glob.glob(root + "*_pose.txt")
    for file in filenames:
        # uav2camera
        rotation_matrix, translation_matrix = read_truth_Rt(file)
        img = file.replace("_pose.txt", ".jpg")
        test_extrinsic(img, rotation_matrix, translation_matrix)
