
import cv2
from PyQt5.QtCore import Qt, QThread, pyqtSignal
import numpy as np
import os
import copy
import time
from kinematic import *
import math
from scipy.spatial.transform import Rotation
class Vision(QThread):
    change_pixmap_signal = pyqtSignal(np.ndarray)
    grasp_info_signal=pyqtSignal(str)
    track_info_signal=pyqtSignal(str)
    def __init__(self, camera_index=0):
        super().__init__()
        self.is_running=False
        self.frame=None
        #相机参数
        self.camera_index = camera_index
        self.camera_height=480
        self.camera_width=640
        # 相机内参矩阵和畸变系数
        self.cam_mtx = np.array( [[670.92263044  ,      0.        ,   358.82135372],
                            [  0.           ,    673.8728403 ,   224.19994634],
                            [  0.           ,      0.        ,     1.        ]], dtype=np.float64)

        self.cam_dist = np.array([-0.19770613, -0.19343703 ,-0.00058339, -0.00063087 , 0.07889203], dtype=np.float64)
        
        #手眼标定变量
        self.handeye_joints=[]
        self.RT_cam_to_end=None#手眼标定的结果
        self.eye_to_end_existed=False
        # 检查文件是否存在
        if os.path.isfile('calibration/handeye_clib/eye_to_end.npy'):
            self.RT_cam_to_end = np.load('calibration/handeye_clib/eye_to_end.npy')
            self.eye_to_end_existed=True
        
        #视觉抓取变量
        #默认是夹取黄橡皮的配置
        self.is_grasping=False
        self.is_roiViewing=False
        self.hsv_lower = np.array([0, 150, 100])
        self.hsv_upper = np.array([255, 255, 255])
        self.target_size=[32,12]#目标长宽
        self.place_pose=[200,200,50,0,-90]#放置的位姿
        self.waitJoint=[0,102,-48,-115,0]#待命位形
        self.pick_z=5#夹取的z
        self.RTs=[]
        self.angles=[]
        self.uart=None#这只是引用
        self.teach=None
        
        #目标跟踪变量
        self.is_tracking=False
        self.is_roiViewing_2=False
        self.hsv_lower_2 = np.array([0, 150, 100])
        self.hsv_upper_2 = np.array([255, 255, 255])
        self.target_size_2=[32,12]
        self.hold_dis=170#mm
        self.lamda=0.5
        self.waitJoint_2=[0,102,-48,-108,0]#待命位形
        self.uart_2=None#这只是引用
        self.teach_2=None
        self.start_time=0
        self.end_time=0
        
    def run(self):
        cap = cv2.VideoCapture(self.camera_index)
        while True and self.is_running:
            self.start_time = time.time()  # 记录处理开始时间
            ret, self.frame = cap.read()
            if ret:
                
                if self.is_grasping:
                    self.grasp()
                if self.is_roiViewing:
                    self.roiView()
                if self.is_roiViewing_2:
                    self.roiView_2()
                if self.is_tracking:
                    self.track()
                
                self.change_pixmap_signal.emit(self.frame)
                
        cap.release()

    def get_camera_list(self):
        camera_list=[]
        for index in range(3):  # Assume at most 5 cameras, you can adjust this based on your system
            try:
                cap = cv2.VideoCapture(index)
            except Exception as e:
                continue
            if cap.isOpened():
                camera_list.append(str(index))
                cap.release()    
        return camera_list
    def snap(self,joint_cur_paras):
        ret=cv2.imwrite('calibration/handeye_clib/hand_eye/'+str(len(self.handeye_joints))+'.jpg',self.frame)
        if ret:
            
            print('save image success')
            print('当前关节角：', joint_cur_paras)
            
            # 深拷贝
            deep_copy_of_one = copy.deepcopy(joint_cur_paras)
            # 将深拷贝对象添加到列表
            self.handeye_joints.append(deep_copy_of_one)
    
            print('当前关节角集合：', self.handeye_joints)
            return True

        else:
            return False

    def stop(self):
        self.is_running=False
    
    def roiView(self):
        # 将图像转换为 HSV 格式
        hsv_image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)

        # 根据调整的 HSV 范围提取区域
        mask = cv2.inRange(hsv_image, self.hsv_lower, self.hsv_upper)
        self.frame = cv2.bitwise_and(self.frame, self.frame, mask=mask)

            
    def grasp(self):
        
        ret,RT_target_to_base,angle=self.extract_roi()
        if ret:
            self.RTs.append(RT_target_to_base)
            self.angles.append(angle)
        if len(self.RTs)>60:
            print('')
            print('60组位姿集齐完毕，开始滤波...')
            self.grasp_info_signal.emit('60组位姿集齐完毕，开始滤波...')
            good,RT_avg,angle_avg=self.filter(np.array(self.RTs),np.array(self.angles))
            offset_dis=10
            offset_angle=math.atan2(RT_avg[1,3],RT_avg[0,3])
            RT_avg[0,3]=RT_avg[0,3]+offset_dis*math.cos(offset_angle)
            RT_avg[1,3]=RT_avg[1,3]+offset_dis*math.sin(offset_angle)
            print('滤波完成，RT_avg:',RT_avg,'\nangle_avg:',angle_avg)
            self.grasp_info_signal.emit('有效位姿'+str(good)+'平均位置('+"{:.2f}".format(RT_avg[0,3])+','+"{:.2f}".format(RT_avg[1,3])+')平均角度'+"{:.2f}".format(angle_avg))
            self.RTs.clear()
            self.angles.clear()
            if good>30:
                print('好位姿大于30组，开始逆解...')
                self.grasp_info_signal.emit('好位姿大于30组，开始逆解...')
                
                
                ret,joint_angles,best_alpha=inverse_kinematic(RT_avg[0,3],RT_avg[1,3],self.pick_z,0,-90)
                if ret:
                    
                    #两组roll可选
                    roll1=-(angle_avg-joint_angles[0][0])
                    roll2=-(angle_avg+180-joint_angles[0][0])
                    # print('roll1:',roll1)
                    if roll1>90:
                        roll1=roll1-180
                    if roll1<-90:
                        roll1=180+roll1
                        
                    if roll2>90:
                        roll2=roll2-180
                    if roll2<-90:
                        roll2=180+roll2
                        
                    if roll1>=-135 and roll1<=135:
                        joint_angles[0][4]=roll1
                    else:
                        joint_angles[0][4]=roll2
                    print('逆解成功，调整后的角度：',joint_angles[0])
                    self.grasp_info_signal.emit('逆解成功! 开始夹取...')
                    
                    points=[self.waitJoint+[1100],joint_angles[0]+[1800]]
                    print('开始夹取...')
                    ret=self.teach.joint_point_teach(self.uart,points)
                    if ret>0:
                        print('夹取成功！开始放置...')
                        self.grasp_info_signal.emit('夹取成功！开始放置...')
                        
                        cur_joint_angles=joint_angles[0]#保存夹取位置的joint
                        ret,joint_angles,best_alpha=inverse_kinematic(self.place_pose[0],self.place_pose[1],self.place_pose[2],self.place_pose[3],self.place_pose[4])
                        if ret:
                            
                            points=[cur_joint_angles+[1800],[0,102,-48,-108,0,1800],joint_angles[0]+[1100]]
                            ret=self.teach.joint_point_teach(self.uart,points)
                            if ret>0:
                                print('放置成功！开始回待命位置...')
                                self.grasp_info_signal.emit('放置成功！开始回待命位置...')
                                
                                cur_joint_angles=joint_angles[0]
                                points=[cur_joint_angles+[1100],self.waitJoint+[1100]]
                                ret=self.teach.joint_point_teach(self.uart,points)
                                if ret>0:
                                    print('回待命位置成功！开始下一次检测...')
                                    self.grasp_info_signal.emit('回待命位置成功！开始下一次检测...')
                                    
                                    time.sleep(0.5)#等待机械臂稳定下来再开始下一帧检测
                            else:
                                print('点示教失败！')
                                self.grasp_info_signal.emit('点示教失败！')
                        else:
                            print('放置位置无法逆解！')
                            self.grasp_info_signal.emit('放置位置无法逆解！')
                            
                    else:
                        print('轨迹规划失败，夹取失败！')
                        self.grasp_info_signal.emit('轨迹规划失败，夹取失败！')
                    #无论如何，确保回到待命位置在开始下一帧
                    self.uart.send_command(self.waitJoint,'1000')
                    time.sleep(1)
                    self.uart.send_command(self.waitJoint,'1000')
                    time.sleep(0.01)
                    self.uart.set_grapper_pwm(1100)
                    time.sleep(0.5)
                    self.uart.set_grapper_pwm(1100)
                else:
                    print('逆解失败!')
                    self.grasp_info_signal.emit('逆解失败!')
                    
            else:
                print('好位姿少于30组，继续检测...') 
                self.grasp_info_signal.emit('好位姿少于30组，继续检测...') 
                

    # 检测轮廓是否为矩形 contour 轮廓信息
    def get_rectangle_infor(self,contour,target_size):
        """检测矩形并获取目标img_pose

        Args:
            contour (_list_): _description_
            target_size (_tuple_): (长，宽)

        Returns:
            _type_: _description_
        """
        rect_flag = 0 # 检测轮廓是否为矩形

        # 凸包(Convex Hull)
        hull_contour = cv2.convexHull(contour)
        contours = hull_contour

        rect = cv2.minAreaRect(contours)  # 检测轮廓最小外接矩形，得到最小外接矩形的（中心(x,y), (宽,高), 旋转角度）
        
        #轮廓约束检查
        area = int(rect[1][0]*rect[1][1])  # 计算包围形状的面积
        if area < 1200 or area>(320*240):  # 过滤面积过小或过大的形状
            rect_flag = 0
            return rect_flag, 0, 0
        long=0
        short=0
        if rect[1][0]>rect[1][1]:
            long=rect[1][0]
            short=rect[1][1]
        else:
            long=rect[1][1]
            short=rect[1][0]
        ratio=target_size[0]/target_size[1]#长比宽
        if long/short>ratio*1.3 or long/short<ratio*0.7:
            rect_flag = 0
            return rect_flag, 0, 0

        #获取imgpose
        box = cv2.boxPoints(rect)  # 获取最小外接矩形的4个顶点坐标
        # box_temp = np.copy(box)
        box_temp = np.zeros((5,2), np.float32)
        box_temp[2] = rect[0] # 矩形中心坐标
        
        #计算象素差
        delta_x=[]
        delta_y=[]
        for i in range(4):
            delta_x.append(box[i][0]-box_temp[2][0])
            delta_y.append(box_temp[2][1]-box[i][1])
        #计算角度
        angles=[]#存的是角度及对应的在box中的索引
        for i in range(4):
            angles.append([math.atan2(delta_y[i],delta_x[i])*180/math.pi,i])
        #按角度升序排序
        angles.sort(key=lambda x:x[0])
        #比较
        index=[-1,-1,-1,-1]
        if abs(angles[1][0]-0)<1e-6 and abs(angles[3][0]-180)<1e-6 and angles[2][0]<90:
            index[0]=angles[2][1]
            index[1]=angles[1][1]
            index[2]=angles[3][1]
            index[3]=angles[0][1]
        else:
            index[0]=angles[3][1]
            index[1]=angles[2][1]
            index[2]=angles[0][1]
            index[3]=angles[1][1]
        
        if all(index[i]==-1 for i in range(4)):
            rect_flag = 0
        else:
            box_temp[0]=box[index[0]]
            box_temp[1]=box[index[1]]
            box_temp[3]=box[index[2]]
            box_temp[4]=box[index[3]]
            rect_flag = 1
        return rect_flag, box_temp, rect

    # 计算世界坐标的排列顺序
    def get_target_world_pose(self,target_img_pose, target_size):
        target_word_pose = np.zeros((5,3), np.float32)
        # print("target_img_pose",target_img_pose)

        t_h = np.linalg.norm(target_img_pose[1] - target_img_pose[0]) # 两个顶点的距离
        t_w = np.linalg.norm(target_img_pose[3] - target_img_pose[0])
        # print(t_h, t_w)
        if t_h >= t_w:
            if target_size[0] >= target_size[1]:
                world_x = target_size[0]
                world_y = target_size[1]
            else:
                world_x = target_size[1]
                world_y = target_size[0]  
        else: 
            if target_size[0] <= target_size[1]:
                world_x = target_size[0]
                world_y = target_size[1]
            else:
                world_x = target_size[1]
                world_y = target_size[0]  
        #注意，按这样建系，世界坐标系的z轴朝下
        target_word_pose[0] = [-world_x/2, -world_y/2, 0]
        target_word_pose[1] = [world_x/2, -world_y/2, 0]
        target_word_pose[2] = [0,0, 0]
        target_word_pose[3] = [-world_x/2, world_y/2, 0]
        target_word_pose[4] = [world_x/2, world_y/2, 0]
        
        return target_word_pose

    def get_target_base_pose(self,target_img_pose,target_world_pose, target_size,RT_end_to_base):
        retval, rvec, tvec = cv2.solvePnP(target_world_pose, target_img_pose, self.cam_mtx, self.cam_dist)
        if retval:
            RT = np.column_stack(((cv2.Rodrigues(rvec))[0],tvec))
            RT = np.row_stack((RT, np.array([0, 0, 0, 1])))
            RT_target_to_cam = RT
            RT_target_to_base=RT_end_to_base@self.RT_cam_to_end@RT_target_to_cam
            return True,RT_target_to_base
        else:
            return False,None
        

    # 回调函数，用于提取符合 HSV 范围的区域并显示结果
    def extract_roi(self):
        # 将图像转换为 HSV 格式
        hsv_image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)

        # 根据调整的 HSV 范围提取区域
        mask = cv2.inRange(hsv_image, self.hsv_lower, self.hsv_upper)
        # result = cv2.bitwise_and(self.frame, self.frame, mask=mask)
        
        # temp=copy.deepcopy(self.frame)
        
        # binary=cv2.threshold(blurred,127,255,cv2.THRESH_OTSU)[1]
        # cv2.imshow('binary',binary)
        
        (cnts, _) = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)  # 轮廓检测
        target_img_pose = []
        target_world_pose = []
        shape_infor = []
        detect_flag = 0 #是否存在指定形状的flag
        for i, contour in enumerate(cnts):
            
            retval, target_img_pose, shape_infor = self.get_rectangle_infor(contour,self.target_size)
            
            #检测轮廓成功
            if retval == 1:
                detect_flag = 1 
                target_world_pose=self.get_target_world_pose(target_img_pose,self.target_size)#target_size根据实际改
                # print(target_world_pose)
                break#这里每诊只检测一个
                # cv2.rectangle(image,rec=infor,color=(0,255,0),thickness=2)
                # cv2.imshow('image',image)
                # cv2.waitKey(0)
        if detect_flag:
            # print("检测到目标形状")
            
            box = cv2.boxPoints(shape_infor)
            box = np.int0(box)
            # # 在原图上绘制最小外包矩形
            # cv2.drawContours(temp, [box], 0, (0, 255, 0), 2)
            # for point in box:
            #     cv2.circle(self.frame,point,3,(255,0,0),-1)
            cv2.arrowedLine(self.frame,(int(target_img_pose[2][0]), int(target_img_pose[2][1])),(int((target_img_pose[1][0]+target_img_pose[4][0])/2), int((target_img_pose[1][1]+target_img_pose[4][1])/2)),color=(0,255,0))
            cv2.arrowedLine(self.frame,(int(target_img_pose[2][0]), int(target_img_pose[2][1])),(int((target_img_pose[3][0]+target_img_pose[4][0])/2), int((target_img_pose[3][1]+target_img_pose[4][1])/2)),color=(255,0,0))
            cv2.circle(self.frame,(int(target_img_pose[2][0]), int(target_img_pose[2][1])), 3, (0, 0, 255),-1) 
            # cv2.putText(self.frame, "1", (int(target_img_pose[0][0]), int(target_img_pose[0][1])), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
            # cv2.putText(self.frame, "2", (int(target_img_pose[1][0]), int(target_img_pose[1][1])), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
            # cv2.putText(self.frame, "3", (int(target_img_pose[3][0]), int(target_img_pose[3][1])), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
            # cv2.putText(self.frame, "4", (int(target_img_pose[4][0]), int(target_img_pose[4][1])), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
            # # cv2.rectangle(image,rec=infor,color=(0,255,0),thickness=2)
            
            RT_end_to_base=forward_kinematic(np.deg2rad(self.waitJoint))
            retval,RT_target_to_base=self.get_target_base_pose(target_img_pose,target_world_pose,target_size=self.target_size,RT_end_to_base=RT_end_to_base)
            if retval:
                RT_target_to_base=np.array(RT_target_to_base)
                cv2.putText(self.frame, "targetTobase:"+"{:.2f}".format(RT_target_to_base[0,3])+','+"{:.2f}".format(RT_target_to_base[1,3])+','+"{:.2f}".format(RT_target_to_base[2,3]), (10, 100), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 1)
                #算 长边偏角
                angle=0
                if np.linalg.norm(target_img_pose[1] - target_img_pose[0])>np.linalg.norm(target_img_pose[3] - target_img_pose[0])  :
                    #说明世界坐标系的x轴与长边平行
                    angle=math.atan2(RT_target_to_base[1,0],RT_target_to_base[0,0])*180/math.pi
                else:
                    angle=math.atan2(RT_target_to_base[1,1],RT_target_to_base[0,1])*180/math.pi
                cv2.putText(self.frame, "angle:"+"{:.2f}".format(angle), (10, 150), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 1)
                
                # cv2.imshow('目标检测结果',self.frame)
                return True,RT_target_to_base,angle
            return False,None,None
            
        else:
            return False,None,None

    def filter(self,RTs,angles):
        """执行去噪，取平均功能

        Args:
            RT (_ndarray_): _description_
            angles (_ndarray_): _description_
        Ret:
            取平均后的RT及angle
        """
        # 假设poses是一个包含60组位姿的4x4的numpy数组
        # poses的形状应该是 (60, 4, 4)，其中60是位姿的数量，4x4是每个位姿的矩阵形状

        # 从poses中提取x和y分量
        x_values = RTs[:, 0, 3]  # 提取x分量，假设x在每个位姿的第一列
        y_values = RTs[:, 1, 3]  # 提取y分量，假设y在每个位姿的第二列

        # 计算x和y分量的均值和方差
        x_mean = np.mean(x_values)
        y_mean = np.mean(y_values)
        x_std = np.std(x_values)
        y_std = np.std(y_values)

        # 定义阈值，可以根据实际情况调整
        threshold_x = 2 * x_std  # 使用2倍标准差作为阈值
        threshold_y = 2 * y_std

        # 根据均值和方差去除噪声位姿
        filtered_RTs = RTs[
            (np.abs(x_values - x_mean) < threshold_x) & (np.abs(y_values - y_mean) < threshold_y)
        ]
        filtered_angle = angles[
            (np.abs(x_values - x_mean) < threshold_x) & (np.abs(y_values - y_mean) < threshold_y)
        ]

        # filtered_poses包含经过噪声去除后的位姿

        # 打印结果
        print("原始位姿数量:", len(RTs))
        print("有效位姿数量:", len(filtered_RTs))
        good=len(filtered_RTs)
        #取平均
        filtered_RTs_mean = np.mean(filtered_RTs,axis=0)
        filtered_angle_mean = np.mean(filtered_angle,axis=0)
        return good,filtered_RTs_mean,filtered_angle_mean
    
    #下面是目标跟踪系列函数
    
    def roiView_2(self):
        # 将图像转换为 HSV 格式
        hsv_image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)

        # 根据调整的 HSV 范围提取区域
        mask = cv2.inRange(hsv_image, self.hsv_lower_2, self.hsv_upper_2)
        self.frame = cv2.bitwise_and(self.frame, self.frame, mask=mask)
        
    def get_RT_target_to_cam(self,target_img_pose,target_world_pose):
        retval, rvec, tvec = cv2.solvePnP(target_world_pose, target_img_pose, self.cam_mtx, self.cam_dist)
        if retval:
            RT = np.column_stack(((cv2.Rodrigues(rvec))[0],tvec))
            RT = np.row_stack((RT, np.array([0, 0, 0, 1])))
            RT_target_to_cam = RT
            
            return True,RT_target_to_cam
        else:
            return False,None

    def get_RT_from_chessboard(self,img, chess_board_x_num, chess_board_y_num, chess_board_len, cam_mtx, cam_dist):
        '''
        #用来从棋盘格图片得到相机外参
        :param img_path: 读取图片路径
        :param chess_board_x_num: 棋盘格x方向格子数
        :param chess_board_y_num: 棋盘格y方向格子数
        :param chess_board_len: 单位棋盘格长度,mm
        :param cam_mtx: 相机内参
        :param cam_dist: 相机畸变参数
        :return: 相机外参
        '''

        
        # termination criteria
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
        # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
        # 标定板世界坐标
        objp = np.zeros((chess_board_y_num*chess_board_x_num,3), np.float32)
        for m in range(chess_board_y_num):
            for n in range(chess_board_x_num):
                objp[m*chess_board_x_num + n] = [n*chess_board_len-chess_board_len*int(chess_board_x_num/2), m*chess_board_len-chess_board_len*int(chess_board_y_num/2), 0]
        #平移一下
        objp=np.array([[x-chess_board_len*int(chess_board_x_num/2),y-chess_board_len*int(chess_board_y_num/2),0] for x,y,z in objp])
        # print('obj:\n',objp)
        # Arrays to store object points and image points from all the images.
        objpoints = [] # 3d point in real world space
        imgpoints = [] # 2d points in image plane.

        
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # Find the chess board corners
        ret, corners = cv2.findChessboardCorners(gray, (chess_board_x_num, chess_board_y_num), None)

        # If found, add object points, image points (after refining them)
        if ret == True:
            objpoints.append(objp)
            corners2 = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
            imgpoints.append(corners2)
            draw_p=cv2.drawChessboardCorners(img, (chess_board_x_num, chess_board_y_num), corners, ret)
        
            # Draw and display the corners
            # cv2.namedWindow('img', 0)
            # cv2.resizeWindow('img', int(2560*0.6), int(1440*0.6))
            # cv2.drawChessboardCorners(img, (chess_board_x_num, chess_board_y_num), corners2, ret)
            # cv2.imshow('img', img)
            # cv2.waitKey(10)
            # input("请输入任意字符继续：")
            retval, rvec, tvec = cv2.solvePnP(objpoints[0], imgpoints[0], cam_mtx, cam_dist)
            # print(rvec.reshape((1,3)))
            # 反投影误差
            total_error = 0
            imgpoints2, _ = cv2.projectPoints(objpoints[0], rvec, tvec, cam_mtx, cam_dist)
            error = cv2.norm(imgpoints[0], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
            total_error += error
            # print("SolvePnP error: ", total_error / len(objpoints))

            RT = np.column_stack(((cv2.Rodrigues(rvec))[0],tvec))
            RT = np.row_stack((RT, np.array([0, 0, 0, 1])))
            cv2.putText(draw_p, "chessTocam:"+"{:.2f}".format(RT[0,3])+','+"{:.2f}".format(RT[1,3])+','+"{:.2f}".format(RT[2,3]), (10, 75), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
            
            # cv2.imshow('棋盘格',draw_p)


            return True,RT
        return False,None

    def get_RT_from_target(self,frame,target_size):
        # 将图像转换为 HSV 格式
        hsv_image = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        # 根据调整的 HSV 范围提取区域
        mask = cv2.inRange(hsv_image, self.hsv_lower_2, self.hsv_upper_2)
        result = cv2.bitwise_and(frame, frame, mask=mask)

        (cnts, _) = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)  # 轮廓检测
        target_img_pose = []
        target_world_pose = []
        shape_infor = []
        detect_flag = 0 #是否存在指定形状的flag
        for i, contour in enumerate(cnts):
            
            retval, target_img_pose, shape_infor = self.get_rectangle_infor(contour,target_size)
            
            #检测轮廓成功
            if retval == 1:
                detect_flag = 1 
                target_world_pose=self.get_target_world_pose(target_img_pose,target_size)#target_size根据实际改
                # print(target_world_pose)
                break#这里每诊只检测一个
                # cv2.rectangle(image,rec=infor,color=(0,255,0),thickness=2)
                # cv2.imshow('image',image)
                # cv2.waitKey(0)
        if detect_flag:
            # print("检测到目标形状")
            
            box = cv2.boxPoints(shape_infor)
            box = np.int0(box)
            # # 在原图上绘制最小外包矩形
            # cv2.drawContours(temp, [box], 0, (0, 255, 0), 2)
            for point in box:
                cv2.circle(frame,point,3,(255,0,0),-1)
            cv2.arrowedLine(frame,(int(target_img_pose[2][0]), int(target_img_pose[2][1])),(int((target_img_pose[1][0]+target_img_pose[4][0])/2), int((target_img_pose[1][1]+target_img_pose[4][1])/2)),color=(0,255,0))
            cv2.arrowedLine(frame,(int(target_img_pose[2][0]), int(target_img_pose[2][1])),(int((target_img_pose[3][0]+target_img_pose[4][0])/2), int((target_img_pose[3][1]+target_img_pose[4][1])/2)),color=(255,0,0))
            cv2.circle(frame,(int(target_img_pose[2][0]), int(target_img_pose[2][1])), 3, (0, 0, 255),-1) 
            cv2.putText(frame, "1", (int(target_img_pose[0][0]), int(target_img_pose[0][1])), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
            cv2.putText(frame, "2", (int(target_img_pose[1][0]), int(target_img_pose[1][1])), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
            cv2.putText(frame, "3", (int(target_img_pose[3][0]), int(target_img_pose[3][1])), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
            cv2.putText(frame, "4", (int(target_img_pose[4][0]), int(target_img_pose[4][1])), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
            # cv2.rectangle(image,rec=infor,color=(0,255,0),thickness=2)
            
            retval,RT_target_to_cam=self.get_RT_target_to_cam(target_img_pose,target_world_pose)
            if retval:
                cv2.putText(frame, "targetTocam:"+"{:.2f}".format(RT_target_to_cam[0,3])+','+"{:.2f}".format(RT_target_to_cam[1,3])+','+"{:.2f}".format(RT_target_to_cam[2,3]), (10, 75), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
                
                return True,RT_target_to_cam
            return False,None
            
        else:
            return False,None

    def track(self):
        
         #[1]获取RT_cam_to_icam当前相机相对于期望相机的位姿
        # ret,RT_chess_to_cam=get_RT_from_chessboard(frame,chess_board_x_num=chess_board_x_num,chess_board_y_num=chess_board_y_num,chess_board_len=chess_board_len,cam_mtx=cam_mtx,cam_dist=cam_dist)
        ret,RT_target_to_cam=self.get_RT_from_target(self.frame,target_size=self.target_size_2)
        if ret:
            # RT_cam_to_chess=np.linalg.inv(RT_chess_to_cam)
            RT_cam_to_target=np.linalg.inv(RT_target_to_cam)
            # print("RT_cam_to_chess:\n",RT_cam_to_chess)
            #自定义期望位置
            # RT_chess_to_icam=np.array([[1,0,0,0],
            #                            [0,1,0,0],
            #                            [0,0,1,170],
            #                            [0,0,0,1]])
            RT_target_to_icam=np.array([[1,0,0,0],
                            [0,1,0,0],
                            [0,0,1,self.hold_dis],
                            [0,0,0,1]])
            #计算RT_cam_to_icam
            # RT_cam_to_icam=RT_chess_to_icam@RT_cam_to_chess
            RT_cam_to_icam=RT_target_to_icam@RT_cam_to_target
            # print("RT_cam_to_icam:\n",RT_cam_to_icam)
            
            #[2]计算控制律V_end_to_end
            # 提取旋转部分
            R = RT_cam_to_icam[:3, :3]
            # 提取平移部分
            t = RT_cam_to_icam[:3, 3]
            # # 将旋转矩阵转换为旋转向量（等效角轴表示）
            r = Rotation.from_matrix(R)
            rot_vector = r.as_rotvec()
            # cv2.putText(frame, "t:"+"{:.2f}".format(t[0])+"{:.2f}".format(t[1])+"{:.2f}".format(t[2]), (10, 400), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
            # cv2.putText(frame, "r:"+"{:.2f}".format(rot_vector[0])+"{:.2f}".format(rot_vector[1])+"{:.2f}".format(rot_vector[2]), (10, 450), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
            # # 打印结果
            # print("平移向量:", t)
            # print("旋转向量:", rot_vector)
            
            # if abs(t[0])>=30 or abs(t[1])>=30 or abs(t[2])>=30:
            #     track_done=False
            # if ( all(abs(t)<10)) or (track_done==True  and all(abs(t)<50)):
            #     print('已收敛,抗抖动...')
            #     track_done=True
                # 如果处于基本收敛了，就别再动了，防止抖动
            # if abs(rot_vector[0])>=0.2 or abs(rot_vector[1])>=0.2 or abs(rot_vector[2])>=0.2 or abs(t[0])>=30 or abs(t[1])>=30 or abs(t[2])>=30:
            #     track_done=False
            # if (all(abs(rot_vector)<0.1) and all(abs(t)<10)) or (track_done==True and all(abs(rot_vector)<0.2) and all(abs(t)<50)):
            #     print('已收敛,抗抖动...')
            #     track_done=True
            #     #如果处于基本收敛了，就别再动了，防止抖动
            # else:
            
            # lamda=0#控制收敛速度
            # if (all(abs(rot_vector)<0.2) and all(abs(t)<50)) :
            #     lamda=0.1
            # else:
            lamda=self.lamda#控制收敛速度
            # 设计控制律
            v_cam_to_cam=-lamda*R.T@t
            # print('dis_icam_to_cam：',np.linalg.norm(-R.T@t))
            v_cam_to_cam=v_cam_to_cam.reshape(3,1)#换成列向量
            w_cam_to_cam=-lamda*rot_vector#注意这是以弧度为单位
            w_cam_to_cam=w_cam_to_cam.reshape(3,1)
            v_end_to_end=self.RT_cam_to_end[0:3,0:3]@v_cam_to_cam
            w_end_to_end=self.RT_cam_to_end[0:3,0:3]@w_cam_to_cam
            # print("v",v_end_to_end)
            # print("w",w_end_to_end)
            V_end_to_end=np.concatenate((v_end_to_end, w_end_to_end), axis=0)# 使用concatenate将它们叠起来，axis=1表示沿着列的方向叠起来
            # print("V_end_to_end",V_end_to_end)
            
        
            #获取当前jacobian，6x5
            J=jacobian(np.deg2rad(self.teach_2.joint_cur_paras))
            # J=J[0:3,:]#只跟随位置
            J_pinv=np.linalg.pinv(J)
            #计算关节角速率
            q_dot=J_pinv@V_end_to_end#弧度每秒
            q_dot=q_dot*180/np.pi#转换度每秒
            # print("q_dot:\n",q_dot)
            # print('cur_joints:',teach.joint_cur_paras)
            cv2.putText(self.frame, "q1dot:"+"{:.2f}".format(q_dot[0][0]), (10, 100), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
            cv2.putText(self.frame, "q2dot:"+"{:.2f}".format(q_dot[1][0]), (10, 125), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
            cv2.putText(self.frame, "q3dot:"+"{:.2f}".format(q_dot[2][0]), (10, 150), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
            cv2.putText(self.frame, "q4dot:"+"{:.2f}".format(q_dot[3][0]), (10, 175), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
            cv2.putText(self.frame, "q5dot:"+"{:.2f}".format(q_dot[4][0]), (10, 200), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
            #速率检查
            if all(dot[0]<=2000 for dot in q_dot):
                #控制运动
                # 计算处理时间
                self.end_time = time.time()
                delta_t = self.end_time - self.start_time
                # print('delta_t:',delta_t)
                self.teach_2.joint_old_paras=self.teach_2.joint_cur_paras
                # q_dot=np.append(q_dot,0)
                # print('当前关节角:',teach.joint_cur_paras)
                delta=q_dot.reshape(5,)*delta_t
                self.teach_2.joint_cur_paras=(self.teach_2.joint_cur_paras+delta).tolist()#要记得转列表
                # print('delta:',delta)
                # print('加完之后的关节角:',teach.joint_cur_paras)
                cv2.putText(self.frame, "q1:"+"{:.2f}".format(self.teach_2.joint_cur_paras[0]), (10, 225), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
                cv2.putText(self.frame, "q2:"+"{:.2f}".format(self.teach_2.joint_cur_paras[1]), (10, 250), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
                cv2.putText(self.frame, "q3:"+"{:.2f}".format(self.teach_2.joint_cur_paras[2]), (10, 275), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
                cv2.putText(self.frame, "q4:"+"{:.2f}".format(self.teach_2.joint_cur_paras[3]), (10, 300), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
                cv2.putText(self.frame, "q5:"+"{:.2f}".format(self.teach_2.joint_cur_paras[4]), (10, 325), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
                
                cv2.putText(self.frame, "d1:"+"{:.3f}".format(delta[0]), (10, 350), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
                cv2.putText(self.frame, "d2:"+"{:.3f}".format(delta[1]), (10, 375), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
                cv2.putText(self.frame, "d3:"+"{:.3f}".format(delta[2]), (10, 400), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
                cv2.putText(self.frame, "d4:"+"{:.3f}".format(delta[3]), (10, 425), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
                cv2.putText(self.frame, "d5:"+"{:.3f}".format(delta[4]), (10, 450), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
                

                TF=forward_kinematic(np.deg2rad(self.teach_2.joint_cur_paras))
                # print('x=',TF[0,3],'y=',TF[1,3],'z=',TF[2,3])
                
                if self.teach_2.check_angles(self.teach_2.joint_cur_paras)==True :
                    if TF[0,3]>-10 and TF[2,3]>10:
                
                        self.uart_2.send_command(self.teach_2.joint_cur_paras,'0050')
                    
                        pass
                    else:
                        print('超出工作空间')
                        self.track_info_signal.emit('>>>超出工作空间')
                        cv2.putText(self.frame, 'out of reach', (10, 50), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
                        
                        self.teach_2.joint_cur_paras=self.teach_2.joint_old_paras
                else:
                    print('关节角超范围')
                    self.track_info_signal.emit('>>>关节角超范围')
                    
                    cv2.putText(self.frame, 'out of range', (10, 50), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
                    
                    self.teach_2.joint_cur_paras=self.teach_2.joint_old_paras
                