import pupil_apriltags as apriltag
import cv2
import numpy as np
import sys,os
import math
from tqdm import tqdm

# 设置文字参数
font = cv2.FONT_HERSHEY_SIMPLEX  # 字体
font_scale = 0.5  # 字体缩放
color = (255, 255, 0)  # BGR颜色
thickness = 1  # 线条粗细
line_type = cv2.LINE_AA  # 线型，LINE_AA表示抗锯齿

# 读取相机参数
carb_xml = 'para/realense_Para.xml'
out_path = "test_p"
tag_size = 2.4
fs = cv2.FileStorage(carb_xml, cv2.FILE_STORAGE_READ)
intrinsics = fs.getNode('camera-matrix').mat()
distortion = fs.getNode('distortion').mat().T
R_to_0 = [[[1, 0, 0], [0, 1, 0], [0, 0, 1]], ]


def return_tags(dir="img/aruco"):

    for filename_path in tqdm(os.listdir(dir)):
    
        img = cv2.imread(f"{dir}/{filename_path}")

        # camera_matrix = np.array(([338.563277422543, 0.0, 336.45495347495824],
        #                           [0.0, 338.939280638548, 230.486982216255],
        #                           [0, 0, 1.0]), dtype=np.double)

        # img = cv2.imread(f"{path}{'.png'}")
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # The image must be a grayscale image of type numpy.uint8
        cv2.imwrite("results/gray.png", img_gray)
        #畸变校正，生成校正后图像的新内参
        h, w = img_gray.shape[:2]
        new_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(intrinsics, distortion, (w, h), 1, (w, h))
        cameraParams_Intrinsic = [new_camera_matrix[0][0], new_camera_matrix[1][1], new_camera_matrix[0][2], new_camera_matrix[1][2]]  # camera_fx, camera_fy, camera_cx, camera_cy
        gray = cv2.undistort(img_gray, intrinsics, distortion, None, new_camera_matrix)
        img = cv2.undistort(img, intrinsics, distortion, None, new_camera_matrix)

        tag_detector = apriltag.Detector(nthreads=1,
                                        quad_decimate=1.0,
                                        quad_sigma=0.0,
                                        refine_edges=1,
                                        decode_sharpening=0.25,
                                        debug=0)  # Build a detector for apriltag

        tags = tag_detector.detect(gray,
                                estimate_tag_pose=True,
                                camera_params=cameraParams_Intrinsic,
                                tag_size=tag_size)  # Perform apriltag detection to get a list of detected apriltag


        print("%d apriltags have been detected."%len(tags))
        detect_tags(img,tags,new_camera_matrix)
        # solve()
        cv2.waitKey(0)
        outpath = f"{out_path}/{filename_path}"
        cv2.imwrite(outpath, img)

def RotateByZ(Cx, Cy, thetaZ):
    rz = thetaZ * math.pi / 180.0
    outX = math.cos(rz) * Cx - math.sin(rz) * Cy
    outY = math.sin(rz) * Cx + math.cos(rz) * Cy
    return outX, outY


def RotateByY(Cx, Cz, thetaY):
    ry = thetaY * math.pi / 180.0
    outZ = math.cos(ry) * Cz - math.sin(ry) * Cx
    outX = math.sin(ry) * Cz + math.cos(ry) * Cx
    return outX, outZ


def RotateByX(Cy, Cz, thetaX):
    rx = thetaX * math.pi / 180.0
    outY = math.cos(rx) * Cy - math.sin(rx) * Cz
    outZ = math.sin(rx) * Cy + math.cos(rx) * Cz
    return outY, outZ


def detect_tags(img,tags,new_camera_matrix):
    for tag in tags:
        cv2.circle(img, tuple(tag.corners[0].astype(int)), 4, (0, 0, 255), 1)  # right-bottom
        cv2.circle(img, tuple(tag.corners[1].astype(int)), 4, (0, 0, 255), 1)  # left-top
        cv2.circle(img, tuple(tag.corners[2].astype(int)), 4, (0, 0, 255), 1)  # right-top
        cv2.circle(img, tuple(tag.corners[3].astype(int)), 4, (0, 0, 255), 1)  # left-bottom
        text = f"{tag.tag_id}"
        cv2.putText(img, text, tuple(tag.corners[1].astype(int)), font, font_scale, color, thickness, line_type)
        draw_axes(img, tag.pose_R, tag.pose_t, new_camera_matrix)
        R = tag.pose_R
        rvec, _ = cv2.Rodrigues(R)
        theta = np.linalg.norm(rvec) / np.pi * 180
        # print(theta)
        # tags info
        print("family:", tag.tag_family)
        print("id:", tag.tag_id)
        print("conners:", tag.corners)
        print("homography:", tag.homography)
        print("pose_R:%s\npose_T:%s\npose_err:%s" % (tag.pose_R, tag.pose_t, tag.pose_err))
    if len(tags) >0:
        cv2.imshow("result", img)

def draw_axes(img, R, t, camera_matrix):
    # 定义坐标轴的长度
    axis_length = tag_size/2

    # 定义坐标轴的3D点
    axis_points = np.float32([[0, 0, 0],
                               [axis_length, 0, 0],
                               [0, axis_length, 0],
                               [0, 0, axis_length]]).reshape(-1, 3)

    # 将坐标轴的3D点投影到图像平面
    img_points, _ = cv2.projectPoints(axis_points, R, t, camera_matrix, None)

    # 绘制坐标轴
    img_points = img_points.reshape(-1, 2).astype(int)
    cv2.arrowedLine(img, tuple(img_points[0]), tuple(img_points[1]), (255, 0, 0), 2)  # X轴（红色）
    cv2.arrowedLine(img, tuple(img_points[0]), tuple(img_points[2]), (0, 255, 0), 2)  # Y轴（绿色）
    cv2.arrowedLine(img, tuple(img_points[0]), tuple(img_points[3]), (0, 0, 255), 2)  # Z轴（蓝色）


def solve():
    '''
    Func: When only one apriltag is detected, the PnP method is used to solve the problem.
    Args:
    Return:
    '''
    object_3d_points = np.array(([0, 0, 0],
                                 [0, 200, 0],
                                 [150, 0, 0],
                                 [150, 200, 0]),
                                dtype=np.double)  # Apriltag coordinates in the World coordinate system

    object_2d_point = np.array((tags[0].corners[0].astype(int),
                                tags[0].corners[1].astype(int),
                                tags[0].corners[2].astype(int),
                                tags[0].corners[3].astype(int)),
                               dtype=np.double)  # Apriltag coordinates in the Image pixel system

    dist_coefs = np.array([0, 0, 0, 0, 0], dtype=np.double)  # Distortion coefficient: k1, k2, p1, p2, k3

    # The function solvepnp receives a set of corresponding 3D and 2D coordinates
    # and calculates the geometric transformation corresponding to the two sets 
    # of coordinates (rotation matrix rvec, translation matrix tvec).
    found, rvec, tvec = cv2.solvePnP(object_3d_points, object_2d_point, new_camera_matrix, dist_coefs)
    rotM = cv2.Rodrigues(rvec)[0]
    camera_postion = -np.matrix(rotM).T * np.matrix(tvec)
    # print(camera_postion.T)
    thetaZ = math.atan2(rotM[1, 0], rotM[0, 0]) * 180.0 / math.pi
    thetaY = math.atan2(-1.0 * rotM[2, 0], math.sqrt(rotM[2, 1] ** 2 + rotM[2, 2] ** 2)) * 180.0 / math.pi
    thetaX = math.atan2(rotM[2, 1], rotM[2, 2]) * 180.0 / math.pi
    # camera coordinates
    x = tvec[0]
    y = tvec[1]
    z = tvec[2]
    (x, y) = RotateByZ(x, y, -1.0 * thetaZ)
    (x, z) = RotateByY(x, z, -1.0 * thetaY)
    (y, z) = RotateByX(y, z, -1.0 * thetaX)
    Cx = x * -1
    Cy = y * -1
    Cz = z * -1

    print("camera position:", Cx, Cy, Cz)
    print("camera rotation:", thetaX, thetaY, thetaZ)

    # Extra points for debug the accuracy
    '''
    Out_matrix = np.concatenate((rotM, tvec), axis=1)
    pixel = np.dot(camera_matrix, Out_matrix)
    pixel1 = np.dot(pixel, np.array([0, 100, 105, 1], dtype=np.double))
    pixel2 = pixel1/pixel1[2]
    print("test point coordinate:", pixel2) 
    '''
if __name__ == '__main__':
    return_tags()
    cv2.waitKey(0)
    cv2.destroyAllWindows()