import os
import cv2
from math import *
import numpy as np
import math
'''
    手眼标定函数
    可以输入图片路径或者是导入相机外参
    使用不同的相机需要更改相机内参和畸变系数
    标定板也是同样的问题
'''
class Calibration:
    def __init__(self):
        self.K = np.array([[1370.2, 0.00000000e+00, 971.1372],
                           [0.00000000e+00, 1370.1, 537.8686],
                           [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]], dtype=np.float64)
        self.distortion = np.array([[0.0635, 0.0838, 0.0, 0.0, 0]])
        self.target_x_number = 10
        self.target_y_number = 7
        self.target_cell_size = 15

    # 矩阵转为欧拉角
    def rotation2euler_angles(self,R):
        sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
        singular = sy < 1e-6
        if not singular:
            x = math.atan2(R[2, 1], R[2, 2])
            y = math.atan2(-R[2, 0], sy)
            z = math.atan2(R[1, 0], R[0, 0])
        else:
            x = math.atan2(-R[1, 2], R[1, 1])
            y = math.atan2(-R[2, 0], sy)
            z = 0
        return np.array([x, y, z])

    def angle2rotation(self, x, y, z):
        Rx = np.array([[1, 0, 0], [0, cos(x), -sin(x)], [0, sin(x), cos(x)]])
        Ry = np.array([[cos(y), 0, sin(y)], [0, 1, 0], [-sin(y), 0, cos(y)]])
        Rz = np.array([[cos(z), -sin(z), 0], [sin(z), cos(z), 0], [0, 0, 1]])
        R = Rz @ Ry @ Rx
        return R

    def gripper2base(self, x, y, z, tx, ty, tz):
        thetaX = x / 180 * pi
        thetaY = y / 180 * pi
        thetaZ = z / 180 * pi
        R_gripper2base = self.angle2rotation(thetaX, thetaY, thetaZ)
        T_gripper2base = np.array([[tx], [ty], [tz]])
        Matrix_gripper2base = np.column_stack([R_gripper2base, T_gripper2base])
        Matrix_gripper2base = np.row_stack((Matrix_gripper2base, np.array([0, 0, 0, 1])))
        R_gripper2base = Matrix_gripper2base[:3, :3]
        T_gripper2base = Matrix_gripper2base[:3, 3].reshape((3, 1))
        return R_gripper2base, T_gripper2base

    def target2camera(self, img):
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        ret, corners = cv2.findChessboardCorners(gray, (self.target_x_number, self.target_y_number), None)
        corner_points = np.zeros((2, corners.shape[0]), dtype=np.float64)
        for i in range(corners.shape[0]):
            corner_points[:, i] = corners[i, 0, :]
        object_points = np.zeros((3, self.target_x_number * self.target_y_number), dtype=np.float64)
        count = 0
        for i in range(self.target_y_number):
            for j in range(self.target_x_number):
                object_points[:2, count] = np.array(
                    [(self.target_x_number - j - 1) * self.target_cell_size,
                     (self.target_y_number - i - 1) * self.target_cell_size])
                count += 1
        retval, rvec, tvec = cv2.solvePnP(object_points.T, corner_points.T, self.K, distCoeffs=self.distortion)
        Matrix_target2camera = np.column_stack(((cv2.Rodrigues(rvec))[0], tvec))
        Matrix_target2camera = np.row_stack((Matrix_target2camera, np.array([0, 0, 0, 1])))
        R_target2camera = Matrix_target2camera[:3, :3]
        T_target2camera = Matrix_target2camera[:3, 3].reshape((3, 1))
        return R_target2camera, T_target2camera

    def process(self, img_path):
        image_list = []
        for root, dirs, files in os.walk(img_path):
            if files:
                for file in files:
                    image_name = os.path.join(root, file)
                    image_list.append(image_name)
        R_target2camera_list = []
        T_target2camera_list = []
        for img_path in image_list:
            img = cv2.imread(img_path)
            R_target2camera, T_target2camera = self.target2camera(img)
            R_target2camera_list.append(R_target2camera)
            T_target2camera_list.append(T_target2camera/1000)
        R_gripper2base_list = []
        T_gripper2base_list = []
        rota_list5_8 = []  # ----保存相机外参的旋转向量
        tran_list5_8 = []  # ----保存相机外参的平移向量
        # 打开并读取txt文件
        with open('VisionModule/matlabprogram/cameraRotate5_8.txt', 'r') as rota_file:
            rota_data = rota_file.readlines()
        # 输出文件中的数据
        for line in rota_data:
            value = line.strip().split(',')  # 默认按空格分割
            value = list(map(float, value))
            value = np.array([num for num in value])
            vamat = cv2.Rodrigues(value)[0]
            rota_list5_8.append(vamat)
        rota_file.close()
        with open('VisionModule/matlabprogram/cameraTranslate5_8.txt', 'r') as tran_file:
            tran_data = tran_file.readlines()
        # 输出文件中的数据
        for line in tran_data:
            value1 = line.strip().split(',')  # 默认按空格分割
            val = np.array(list(map(float, value1))) / 1000
            val = val.reshape(3, 1)
            tran_list5_8.append(val)
        tran_file.close()
        with open('VisionModule/matlabprogram/robotpose5_8.txt', 'r') as robotposefile:
            robot = robotposefile.readlines()
        for line in robot:
            robo = line.strip().split(',')
            robotran = np.array(list(map(float, robo[0:3])))/1000
            robotran = robotran.reshape(3, 1)
            T_gripper2base_list.append(robotran)
            value = np.array(list(map(float, robo[3:6]))) * math.pi / 180
            vamat = self.angle2rotation(value[0],value[1],value[2])
            R_gripper2base_list.append(vamat)

        R_camera2base, T_camera2base = cv2.calibrateHandEye(R_gripper2base_list, T_gripper2base_list,
                                                            R_target2camera_list, T_target2camera_list)
        return R_camera2base, T_camera2base, R_gripper2base_list, T_gripper2base_list, R_target2camera_list, T_target2camera_list

    def check_result(self, R_cb, T_cb, R_gb, T_gb, R_tc, T_tc):
        RT_camera_to_gripper = np.column_stack((R_cb, T_cb))
        RT_camera_to_gripper = np.row_stack((RT_camera_to_gripper, np.array([0, 0, 0, 1])))

        print(self.rotation2euler_angles(R_cb)*180/pi )
        print(RT_camera_to_gripper)  # 这个就是手眼矩阵
        for i in range(len(R_gb)):
            RT_gripper2base = np.column_stack((R_gb[i], T_gb[i]))
            RT_gripper2base = np.row_stack((RT_gripper2base, np.array([0, 0, 0, 1])))
            RT_camera_to_gripper = np.column_stack((R_cb, T_cb))
            RT_camera_to_gripper = np.row_stack((RT_camera_to_gripper, np.array([0, 0, 0, 1])))
            RT_target_to_camera = np.column_stack((R_tc[i], T_tc[i]))
            RT_target_to_camera = np.row_stack((RT_target_to_camera, np.array([0, 0, 0, 1])))
            RT_target_to_base = RT_gripper2base @ RT_camera_to_gripper @ RT_target_to_camera @ np.array([0,0,0,1])
            print("第{}次验证结果为:".format(i))
            print(RT_target_to_base)

if __name__ == "__main__":
    image_path = r"./VisionModule/CR5_5.8"
    calibrator = Calibration()
    R_cb, T_cb, R_gb, T_gb, R_tc, T_tc = calibrator.process(image_path)
    calibrator.check_result(R_cb, T_cb, R_gb, T_gb, R_tc, T_tc)
