from aprilgrid import Detector
import cv2
import numpy as np
from scipy.spatial.transform import Rotation
import sys
import math
from tqdm import tqdm

# from tools.read_pose import read_pose

# from tools import refence_record as record
houdu =5.0
tag_size = 15.
spacing = 4.5

offset_x=spacing+tag_size*1.5
offset_y=spacing+tag_size*1.5
offset_z=houdu/2

offset=np.array([[1,0,0,offset_x],
                 [0,1,0,offset_y],
                 [0,0,1,-offset_z],
                 [0,0,0,1]])

def object_points_pair_gen(tag_size, spacing, tag_num, tags):
    object_3d_points_all = np.array((), dtype=np.double)
    object_2d_points_all = np.array((), dtype=np.double)
    for tag in tags:
        tag_id = tag.tag_id
        object_3d_points = np.array(([0, 0, 0],
                                     [tag_size, 0, 0],
                                     [tag_size, tag_size, 0],
                                     [0, tag_size, 0]),
                                    dtype=np.double)
        y = tag_id // tag_num[1]
        x = tag_id % tag_num[1]
        object_3d_points[:, 0] += x * (tag_size + spacing)
        object_3d_points[:, 1] += y * (tag_size + spacing)
        if object_3d_points_all.size == 0:
            object_3d_points_all = object_3d_points
        else:
            object_3d_points_all = np.concatenate((object_3d_points_all, object_3d_points), axis=0)

        if object_2d_points_all.size == 0:
            object_2d_points_all = tag.corners
        else:
            object_2d_points_all = np.concatenate((object_2d_points_all, tag.corners), axis=0)

    return object_2d_points_all, object_3d_points_all


def rotation_matrix_to_euler_angles(R):
    # 计算旋转向量
    rotation_vector, _ = cv2.Rodrigues(R)
    # 构造投影矩阵
    P = np.hstack((R, np.zeros((3, 1))))
    # 分解投影矩阵以获得欧拉角
    _, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(P)
    return euler_angles

def draw_axes(img, R, t, camera_matrix,tag_size,offset=offset):
    # 定义坐标轴的长度
    axis_length = tag_size/2

    # 定义坐标轴的3D点
    axis_points = np.float32([[0, 0, 0],
                               [axis_length, 0, 0],
                               [0, axis_length, 0],
                               [0, 0, axis_length]]).reshape(-1, 3)
    
    if offset is not None:
        axis_points = np.dot(offset[:3,:3],axis_points.T).T+offset[:3,3]


    # 将坐标轴的3D点投影到图像平面
    img_points, _ = cv2.projectPoints(axis_points, R, t, camera_matrix, None)

    # 绘制坐标轴
    img_points = img_points.reshape(-1, 2).astype(int)
    cv2.arrowedLine(img, tuple(img_points[0]), tuple(img_points[1]), (255, 0, 0), 2)  # X轴（红色）
    cv2.arrowedLine(img, tuple(img_points[0]), tuple(img_points[2]), (0, 255, 0), 2)  # Y轴（绿色）
    cv2.arrowedLine(img, tuple(img_points[0]), tuple(img_points[3]), (0, 0, 255), 2)  # Z轴（蓝色）

def draw_circle(img, tags):
    for tag in tags:
        cv2.circle(img, tuple(tag.corners[0].astype(int).squeeze()), 4, (0, 0, 255), 1)  # right-bottom
        cv2.circle(img, tuple(tag.corners[1].astype(int).squeeze()), 4, (0, 0, 255), 1)  # left-top
        cv2.circle(img, tuple(tag.corners[2].astype(int).squeeze()), 4, (0, 0, 255), 1)  # right-top
        cv2.circle(img, tuple(tag.corners[3].astype(int).squeeze()), 4, (0, 0, 255), 1)  # left-bottom
    return img


def aprilgrid_pose(image, intrinsics, distortion,filename, robot=0, tag_type=0):

    if tag_type == 0:
        tag_size = 15
        spacing = 4.5
        tag_num = (3, 3)
    tag_detector = Detector(tag_family_name="t36h11")
    img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    h, w = img_gray.shape[:2]
    new_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(intrinsics, distortion, (w, h), 1, (w, h))
    # cameraParams_Intrinsic = [new_camera_matrix[0][0], new_camera_matrix[1][1], new_camera_matrix[0][2],
    #                           new_camera_matrix[1][2]]  # camera_fx, camera_fy, camera_cx, camera_cy
    gray = cv2.undistort(img_gray, intrinsics, distortion, None, new_camera_matrix)
    cv2.imwrite(f"results/gray/gray_{filename}", gray)

    tags = tag_detector.detect(gray)
    draw_circle(img, tags)
    cv2.imwrite(f"results/circle/detect_{filename}", img)

    object_2d_points_all, object_3d_points_all = object_points_pair_gen(tag_size, spacing, tag_num, tags)
    try:
        _, rvec, tvec = cv2.solvePnP(object_3d_points_all, object_2d_points_all, new_camera_matrix, None)
        flag = True
    except:
        flag = False
    finally:
        if flag:
            draw_axes(image, rvec, tvec, new_camera_matrix, tag_size)
            cv2.imwrite(f"results/axes/pose_{filename}", image)

            Ra, _ = cv2.Rodrigues(rvec)
            pose = np.column_stack((Ra, tvec))
            arca = rotation_matrix_to_euler_angles(Ra)
            np.set_printoptions(precision=3, suppress=True)
            transformation_matrix = np.eye(4)
            transformation_matrix[:3, :3] = Ra
            transformation_matrix[:3, 3] = tvec.reshape(1, 3)*0.001
            if robot == 0:
                R = transformation_matrix[:3, :3]
                arca = rotation_matrix_to_euler_angles(R)
            np.savetxt(f"{pose_path}{filename.split('.')[0]}.txt", transformation_matrix.reshape(1,-1),fmt='%.6f')

            return transformation_matrix, arca
        else:
            cv2.imwrite(f"results/bad/pose_{filename}", image)
            return np.eye(4), np.zeros(3)


if __name__ == '__main__':
    import os
    pose_path ='results/poses/'
    carb_xml = 'para/realense_Para.xml'
    for iter, filename in tqdm(enumerate(os.listdir("img/aruco"))):
        img = cv2.imread(f"img/aruco/{filename}")

        image = img
        fs_side = cv2.FileStorage(carb_xml, cv2.FILE_STORAGE_READ)
        intrinsics = fs_side.getNode('camera-matrix').mat()
        distortion = fs_side.getNode('distortion').mat().T
        transformation_matrix, arca = aprilgrid_pose(image, intrinsics, distortion,filename)
        rx, ry, rz = arca.reshape(3)
        x, y, z = transformation_matrix[:3, 3].reshape(3)
        rotation = Rotation.from_euler('xyz', (rx, ry, rz), degrees=True)

