import glob
import os
import cv2
import numpy as np
from scipy.spatial.transform import Rotation as R
import json
import kornia as kn
import torch
import rowan

p1 = np.array([74.1533, 26.0825, 0.751836]) * 0.001
p2 = np.array([-18.9287, 25.2773, 9.06192]) * 0.001
p3 = np.array([49.7518, -2.02081, 4.0981]) * 0.001
p4 = np.array([-52.3204, 15.8919, 6.59897]) * 0.001
p5 = np.array([-52.656, -65.2309, -20.5108]) * 0.001
point_ob = np.array([p1, p2, p3, p4, p5])

# K = np.array([[3613.29265923574, 0, 1215.23428592292],
#               [0, 3611.73784615899, 1049.92746601733],
#               [0, 0, 1]])
# np.save("/media/liyuke/dataset/vicon4/intrinsic.npy",K)
K = np.load("/media/liyuke/dataset/vicon3/intrinsic.npy")




def read_truth_Rt(labpath):
    if os.path.getsize(labpath):
        with open(labpath, 'r') as f:
            contents = f.readlines()
        xyz = [eval(contents[i].split(" ")[1].strip()) for i in range(5, 8)]
        quat = [eval(contents[i].split(" ")[1].strip()) for i in range(1, 5)]
        r = R.from_quat(quat)

    return rotation_mat, translation_mat


def read_kpts2(labpath):
    labels = []
    for file in labpath:
        with open(file, 'r') as f:
            # 加载JSON数据
            data = json.load(f)
            lab = [p["points"] for p in [data["shapes"]][0]]
            labels.append(np.array(lab)[:, 0, ])
    return labels


def read_kpts3(labpath):
    labels = []
    for file in labpath:
        if os.path.getsize(file):
            with open(file, 'r') as f:
                contents = f.readlines()
            ob2vicon_t = np.array([eval(contents[i].split(" ")[1].strip()) for i in range(5, 8)])
            quat = [eval(contents[i].split(" ")[1].strip()) for i in range(1, 5)]
            r = R.from_quat(quat)
            ob2vicon_R = r.as_matrix()
            # ob2vicon_R = vicon2ob_R.T
            # ob2vicon_t = -np.dot(ob2vicon_R,vicon2ob_t)

            points = np.dot(ob2vicon_R, point_ob.T) + ob2vicon_t.reshape(3, 1)
            labels.append(points.T)
    return labels


def project_2d(pts3d, vicon2camera_R, vicon2camera_t, K):
    p_camera = np.dot(vicon2camera_R, pts3d.T) + vicon2camera_t.T
    p_img = np.dot(K, p_camera)
    p_img = p_img[:2, :] / p_img[2,]

    return p_img


def vicon2camera(root):
    kpts2names = glob.glob(root + "/*json")
    kpts3names = list(map(lambda x: x.replace('.json', '.txt'), kpts2names))
    kpts2 = read_kpts2(kpts2names)
    kpts3 = read_kpts3(kpts3names)
    # _, rvec, T, _ = cv2.solvePnPRansac(objectPoints=np.concatenate(kpts3), imagePoints=np.concatenate(kpts2), cameraMatrix=K, distCoeffs=None, flags=cv2.SOLVEPNP_ITERATIVE, useExtrinsicGuess=False)
    retval, rvec, T = cv2.solvePnP(objectPoints=np.concatenate(kpts3), imagePoints=np.concatenate(kpts2),
                                   cameraMatrix=K, distCoeffs=None)
    angle_axis = torch.tensor(rvec, dtype=torch.float).view(1, 3)
    T = torch.tensor(T, dtype=torch.float).view(1, 3)
    R_inv = torch.tensor([[-1, 0, 0], [0, -1, 0], [0, 0, -1]], dtype=torch.float)
    if T[0, 2] < 0:
        RR = kn.angle_axis_to_rotation_matrix(angle_axis)
        RR = R_inv.matmul(RR)
        RR = rowan.from_matrix(RR.cpu(), require_orthogonal=False)
        ax = rowan.to_axis_angle(RR)
        angle_axis = torch.tensor(ax[0] * ax[1], dtype=torch.float).view(1, 3)
        T = R_inv.matmul(T.t()).t()
    angle_axis = cv2.Rodrigues(angle_axis.numpy())[0]
    # rotation_matrix, _ = cv2.Rodrigues(R)

    return angle_axis, T.numpy()


def test_extrinsic(img_path, label_path, vicon2camera_R, vicon2camera_t):
    kpts3d = read_kpts3([label_path])
    kpts2d = project_2d(kpts3d[0], vicon2camera_R, vicon2camera_t, K)
    img = cv2.imread(img_path)
    for i in range(5):
        cv2.circle(img, (int(kpts2d[0][i]), int(kpts2d[1][i])), 3, (0, 0, 255), -1)
    cv2.imshow("A", img)
    cv2.waitKey()
    return


if __name__ == "__main__":
    root = "/media/liyuke/dataset/vicon3/vicon2camera/"
    # txt_rename(root)
    vicon2camera_R, vicon2camera_t = vicon2camera(root)  # camera_vicon
    # np.save("/media/liyuke/dataset/vicon4/vicon2camera.npy",
    #         np.concatenate((vicon2camera_R, vicon2camera_t.reshape(3, 1)), axis=1))
    test_img = "//media/liyuke/dataset/vicon3/seq5/1736801828202685196.jpg"
    test_label = "/media/liyuke/dataset/vicon3/seq5/1736801828202685196_pose.txt"
    test_extrinsic(test_img, test_label, vicon2camera_R, vicon2camera_t)
