import cv2
import numpy as np
import copy
import math
import sys
sys.path.append('././')
from kinematic import *
from UART import *
from teach import *
import time
# 全局变量，用于存储 HSV 范围值
hsv_lower = np.array([0, 150, 100])
hsv_upper = np.array([255, 255, 255])
#标定结果
RT_cam_to_end = np.load('calibration/handeye_clib/eye_to_end.npy')
# 相机内参矩阵和畸变系数
cam_mtx = np.array( [[670.92263044  ,      0.        ,   358.82135372],
                    [  0.           ,    673.8728403 ,   224.19994634],
                    [  0.           ,      0.        ,     1.        ]], dtype=np.float64)

cam_dist = np.array([-0.19770613, -0.19343703 ,-0.00058339, -0.00063087 , 0.07889203], dtype=np.float64)

# 回调函数，用于调整 HSV 范围值
def on_trackbar_hue_min(value):
    global hsv_lower
    hsv_lower[0] = value
    # extract_roi()

def on_trackbar_hue_max(value):
    global hsv_upper
    hsv_upper[0] = value
    # extract_roi()

def on_trackbar_saturation_min(value):
    global hsv_lower
    hsv_lower[1] = value
    # extract_roi()

def on_trackbar_saturation_max(value):
    global hsv_upper
    hsv_upper[1] = value
    # extract_roi()

def on_trackbar_value_min(value):
    global hsv_lower
    hsv_lower[2] = value
    # extract_roi()

def on_trackbar_value_max(value):
    global hsv_upper
    hsv_upper[2] = value
    # extract_roi()

# 检测轮廓是否为矩形 contour 轮廓信息
def get_rectangle_infor(contour,target_size):
    """检测矩形并获取目标img_pose

    Args:
        contour (_list_): _description_
        target_size (_tuple_): (长，宽)

    Returns:
        _type_: _description_
    """
    rect_flag = 0 # 检测轮廓是否为矩形

    # 凸包(Convex Hull)
    hull_contour = cv2.convexHull(contour)
    contours = hull_contour

    rect = cv2.minAreaRect(contours)  # 检测轮廓最小外接矩形，得到最小外接矩形的（中心(x,y), (宽,高), 旋转角度）
    
    #轮廓约束检查
    area = int(rect[1][0]*rect[1][1])  # 计算包围形状的面积
    if area < 1200 or area>(320*240):  # 过滤面积过小或过大的形状
        rect_flag = 0
        return rect_flag, 0, 0
    long=0
    short=0
    if rect[1][0]>rect[1][1]:
        long=rect[1][0]
        short=rect[1][1]
    else:
        long=rect[1][1]
        short=rect[1][0]
    ratio=target_size[0]/target_size[1]#长比宽
    if long/short>ratio*1.3 or long/short<ratio*0.7:
        rect_flag = 0
        return rect_flag, 0, 0

    #获取imgpose
    box = cv2.boxPoints(rect)  # 获取最小外接矩形的4个顶点坐标
    # box_temp = np.copy(box)
    box_temp = np.zeros((5,2), np.float32)
    box_temp[2] = rect[0] # 矩形中心坐标
    
    #计算象素差
    delta_x=[]
    delta_y=[]
    for i in range(4):
        delta_x.append(box[i][0]-box_temp[2][0])
        delta_y.append(box_temp[2][1]-box[i][1])
    #计算角度
    angles=[]#存的是角度及对应的在box中的索引
    for i in range(4):
        angles.append([math.atan2(delta_y[i],delta_x[i])*180/math.pi,i])
    #按角度升序排序
    angles.sort(key=lambda x:x[0])
    #比较
    index=[-1,-1,-1,-1]
    if abs(angles[1][0]-0)<1e-6 and abs(angles[3][0]-180)<1e-6 and angles[2][0]<90:
        index[0]=angles[2][1]
        index[1]=angles[1][1]
        index[2]=angles[3][1]
        index[3]=angles[0][1]
    else:
        index[0]=angles[3][1]
        index[1]=angles[2][1]
        index[2]=angles[0][1]
        index[3]=angles[1][1]
    
    if all(index[i]==-1 for i in range(4)):
        rect_flag = 0
    else:
        box_temp[0]=box[index[0]]
        box_temp[1]=box[index[1]]
        box_temp[3]=box[index[2]]
        box_temp[4]=box[index[3]]
        rect_flag = 1
    return rect_flag, box_temp, rect

# 计算世界坐标的排列顺序
def get_target_world_pose(target_img_pose, target_size):
    target_word_pose = np.zeros((5,3), np.float32)
    # print("target_img_pose",target_img_pose)

    t_h = np.linalg.norm(target_img_pose[1] - target_img_pose[0]) # 两个顶点的距离
    t_w = np.linalg.norm(target_img_pose[3] - target_img_pose[0])
    # print(t_h, t_w)
    if t_h >= t_w:
        if target_size[0] >= target_size[1]:
            world_x = target_size[0]
            world_y = target_size[1]
        else:
            world_x = target_size[1]
            world_y = target_size[0]  
    else: 
        if target_size[0] <= target_size[1]:
            world_x = target_size[0]
            world_y = target_size[1]
        else:
            world_x = target_size[1]
            world_y = target_size[0]  
    #注意，按这样建系，世界坐标系的z轴朝下
    target_word_pose[0] = [-world_x/2, -world_y/2, 0]
    target_word_pose[1] = [world_x/2, -world_y/2, 0]
    target_word_pose[2] = [0,0, 0]
    target_word_pose[3] = [-world_x/2, world_y/2, 0]
    target_word_pose[4] = [world_x/2, world_y/2, 0]
    
    return target_word_pose

def get_target_base_pose(target_img_pose,target_world_pose, target_size,RT_end_to_base):
    retval, rvec, tvec = cv2.solvePnP(target_world_pose, target_img_pose, cam_mtx, cam_dist)
    if retval:
        RT = np.column_stack(((cv2.Rodrigues(rvec))[0],tvec))
        RT = np.row_stack((RT, np.array([0, 0, 0, 1])))
        RT_target_to_cam = RT
        RT_target_to_base=RT_end_to_base@RT_cam_to_end@RT_target_to_cam
        return True,RT_target_to_base
    else:
        return False,None
    

# 回调函数，用于提取符合 HSV 范围的区域并显示结果
def extract_roi(frame,target_size):
    # 将图像转换为 HSV 格式
    hsv_image = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    # 根据调整的 HSV 范围提取区域
    mask = cv2.inRange(hsv_image, hsv_lower, hsv_upper)
    result = cv2.bitwise_and(frame, frame, mask=mask)
    cv2.waitKey(1)
    # 显示结果
    cv2.imshow("mask",mask)
    # cv2.imshow("Original Image", frame)
    cv2.imshow("Extracted ROI", result)
    
    temp=copy.deepcopy(frame)
    
    # binary=cv2.threshold(blurred,127,255,cv2.THRESH_OTSU)[1]
    # cv2.imshow('binary',binary)
    
    (cnts, _) = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)  # 轮廓检测
    target_img_pose = []
    target_world_pose = []
    shape_infor = []
    detect_flag = 0 #是否存在指定形状的flag
    for i, contour in enumerate(cnts):
        
        retval, target_img_pose, shape_infor = get_rectangle_infor(contour,target_size)
        
        #检测轮廓成功
        if retval == 1:
            detect_flag = 1 
            target_world_pose=get_target_world_pose(target_img_pose,target_size)#target_size根据实际改
            # print(target_world_pose)
            break#这里每诊只检测一个
            # cv2.rectangle(image,rec=infor,color=(0,255,0),thickness=2)
            # cv2.imshow('image',image)
            # cv2.waitKey(0)
    if detect_flag:
        # print("检测到目标形状")
        
        box = cv2.boxPoints(shape_infor)
        box = np.int0(box)
        # # 在原图上绘制最小外包矩形
        # cv2.drawContours(temp, [box], 0, (0, 255, 0), 2)
        for point in box:
            cv2.circle(temp,point,3,(255,0,0),-1)
        cv2.arrowedLine(temp,(int(target_img_pose[2][0]), int(target_img_pose[2][1])),(int((target_img_pose[1][0]+target_img_pose[4][0])/2), int((target_img_pose[1][1]+target_img_pose[4][1])/2)),color=(0,255,0))
        cv2.arrowedLine(temp,(int(target_img_pose[2][0]), int(target_img_pose[2][1])),(int((target_img_pose[3][0]+target_img_pose[4][0])/2), int((target_img_pose[3][1]+target_img_pose[4][1])/2)),color=(255,0,0))
        cv2.circle(temp,(int(target_img_pose[2][0]), int(target_img_pose[2][1])), 3, (0, 0, 255),-1) 
        cv2.putText(temp, "1", (int(target_img_pose[0][0]), int(target_img_pose[0][1])), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
        cv2.putText(temp, "2", (int(target_img_pose[1][0]), int(target_img_pose[1][1])), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
        cv2.putText(temp, "3", (int(target_img_pose[3][0]), int(target_img_pose[3][1])), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
        cv2.putText(temp, "4", (int(target_img_pose[4][0]), int(target_img_pose[4][1])), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
        # cv2.rectangle(image,rec=infor,color=(0,255,0),thickness=2)
        
        RT_end_to_base=forward_kinematic(np.deg2rad([0,102,-48,-108,0]))
        retval,RT_target_to_base=get_target_base_pose(target_img_pose,target_world_pose,target_size=target_size,RT_end_to_base=RT_end_to_base)
        if retval:
            RT_target_to_base=np.array(RT_target_to_base)
            cv2.putText(temp, "targetTobase:"+"{:.2f}".format(RT_target_to_base[0,3])+','+"{:.2f}".format(RT_target_to_base[1,3])+','+"{:.2f}".format(RT_target_to_base[2,3]), (10, 100), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 1)
            #算 长边偏角
            angle=0
            if np.linalg.norm(target_img_pose[1] - target_img_pose[0])>np.linalg.norm(target_img_pose[3] - target_img_pose[0])  :
                #说明世界坐标系的x轴与长边平行
                angle=math.atan2(RT_target_to_base[1,0],RT_target_to_base[0,0])*180/math.pi
            else:
                angle=math.atan2(RT_target_to_base[1,1],RT_target_to_base[0,1])*180/math.pi
            cv2.putText(temp, "angle:"+"{:.2f}".format(angle), (10, 150), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 1)
            
            cv2.imshow('目标检测结果',temp)
            return True,RT_target_to_base,angle
        return False,None,None
        
    else:
        return False,None,None

def filter(RTs,angles):
    """执行去噪，取平均功能

    Args:
        RT (_ndarray_): _description_
        angles (_ndarray_): _description_
    Ret:
        取平均后的RT及angle
    """
    # 假设poses是一个包含60组位姿的4x4的numpy数组
    # poses的形状应该是 (60, 4, 4)，其中60是位姿的数量，4x4是每个位姿的矩阵形状

    # 从poses中提取x和y分量
    x_values = RTs[:, 0, 3]  # 提取x分量，假设x在每个位姿的第一列
    y_values = RTs[:, 1, 3]  # 提取y分量，假设y在每个位姿的第二列

    # 计算x和y分量的均值和方差
    x_mean = np.mean(x_values)
    y_mean = np.mean(y_values)
    x_std = np.std(x_values)
    y_std = np.std(y_values)

    # 定义阈值，可以根据实际情况调整
    threshold_x = 2 * x_std  # 使用2倍标准差作为阈值
    threshold_y = 2 * y_std

    # 根据均值和方差去除噪声位姿
    filtered_RTs = RTs[
        (np.abs(x_values - x_mean) < threshold_x) & (np.abs(y_values - y_mean) < threshold_y)
    ]
    filtered_angle = angles[
        (np.abs(x_values - x_mean) < threshold_x) & (np.abs(y_values - y_mean) < threshold_y)
    ]

    # filtered_poses包含经过噪声去除后的位姿

    # 打印结果
    print("原始位姿数量:", len(RTs))
    print("有效位姿数量:", len(filtered_RTs))
    good=len(filtered_RTs)
    #取平均
    filtered_RTs_mean = np.mean(filtered_RTs,axis=0)
    filtered_angle_mean = np.mean(filtered_angle,axis=0)
    return good,filtered_RTs_mean,filtered_angle_mean
    


# 创建一个窗口
cv2.namedWindow("Original Image")
cv2.namedWindow("Extracted ROI")

# 创建滑动条用于调整 HSV 范围值
cv2.createTrackbar("Hue Min", "Extracted ROI", hsv_lower[0], 179, on_trackbar_hue_min)
cv2.createTrackbar("Hue Max", "Extracted ROI", hsv_upper[0], 179, on_trackbar_hue_max)
cv2.createTrackbar("Saturation Min", "Extracted ROI", hsv_lower[1], 255, on_trackbar_saturation_min)
cv2.createTrackbar("Saturation Max", "Extracted ROI", hsv_upper[1], 255, on_trackbar_saturation_max)
cv2.createTrackbar("Value Min", "Extracted ROI", hsv_lower[2], 255, on_trackbar_value_min)
cv2.createTrackbar("Value Max", "Extracted ROI", hsv_upper[2], 255, on_trackbar_value_max)

# 初始化显示
# extract_roi()
cap = cv2.VideoCapture(0)
uart=UART()
teach=Teach()
wait_joint_angles=[0,102,-48,-108,0]#待命位形
place_pose=[200,200,50,0,-90]#放置的位姿
cur_joint_angles=[]
target_size=(32,12)#目标长宽
uart.port='COM3'
uart.baudrate=115200

if not uart.open_port():
    print('打开串口失败！')
    exit()
else:
    #设置待命位置
    print('串口打开成功')
    time.sleep(1)
    uart.send_command(wait_joint_angles,'1000')
    time.sleep(1)
    # uart.send_command(wait_joint_angles,'0500')
    # time.sleep(1)
    uart.set_grapper_pwm(1100)
    time.sleep(1)
    uart.set_grapper_pwm(1100)

    
RTs=[]
angles=[]
# 遍历视频帧
while cap.isOpened():
    # 从视频中读取一帧
    success, frame = cap.read()

    if success:
        ret,RT_target_to_base,angle=extract_roi(frame,target_size)
        if ret:
            RTs.append(RT_target_to_base)
            angles.append(angle)
        if len(RTs)>60:
            print('')
            print('60组位姿集齐完毕，开始滤波...')
            good,RT_avg,angle_avg=filter(np.array(RTs),np.array(angles))
            offset_dis=10
            offset_angle=math.atan2(RT_avg[1,3],RT_avg[0,3])
            RT_avg[0,3]=RT_avg[0,3]+offset_dis*math.cos(offset_angle)
            RT_avg[1,3]=RT_avg[1,3]+offset_dis*math.sin(offset_angle)
            print('滤波完成，RT_avg:',RT_avg,'\nangle_avg:',angle_avg)
            RTs.clear()
            angles.clear()
            if good>30:
                print('好位姿大于30组，开始逆解...')
                ret,joint_angles,best_alpha=inverse_kinematic(RT_avg[0,3],RT_avg[1,3],10,0,-90)
                if ret:
                    
                    #两组roll可选
                    roll1=-(angle_avg-joint_angles[0][0])
                    roll2=-(angle_avg+180-joint_angles[0][0])
                    # print('roll1:',roll1)
                    if roll1>90:
                        roll1=roll1-180
                    if roll1<-90:
                        roll1=180+roll1
                        
                    if roll2>90:
                        roll2=roll2-180
                    if roll2<-90:
                        roll2=180+roll2
                        
                    if roll1>=-135 and roll1<=135:
                        joint_angles[0][4]=roll1
                    else:
                        joint_angles[0][4]=roll2
                    print('逆解成功，调整后的角度：',joint_angles[0])
                    points=[wait_joint_angles+[1100],joint_angles[0]+[1800]]
                    print('开始夹取...')
                    ret=teach.joint_point_teach(uart,points)
                    if ret==1:
                        print('夹取成功！开始放置...')
                        cur_joint_angles=joint_angles[0]#保存夹取位置的joint
                        ret,joint_angles,best_alpha=inverse_kinematic(place_pose[0],place_pose[1],place_pose[2],place_pose[3],place_pose[4])
                        points=[cur_joint_angles+[1800],[0,102,-48,-108,0,1800],joint_angles[0]+[1100]]
                        ret=teach.joint_point_teach(uart,points)
                        if ret==1:
                            print('放置成功！开始回待命位置...')
                            cur_joint_angles=joint_angles[0]
                            points=[cur_joint_angles+[1100],wait_joint_angles+[1100]]
                            ret=teach.joint_point_teach(uart,points)
                            if ret==1:
                                print('回待命位置成功！开始下一次检测...')
                                time.sleep(0.5)#等待机械臂稳定下来再开始下一帧检测
                    else:
                        print('轨迹规划失败，夹取失败！')
                    #无论如何，确保回到待命位置在开始下一帧
                    uart.send_command(wait_joint_angles,'1000')
                    time.sleep(1)
                    uart.send_command(wait_joint_angles,'1000')
                    time.sleep(0.01)
                    uart.set_grapper_pwm(1100)
                    time.sleep(0.5)
                    uart.set_grapper_pwm(1100)
                else:
                    print('逆解失败!')
            else:
                print('好位姿少于30组，继续检测...')  
                
    key = cv2.waitKey(1) 
    if key == 27:
        break



# 关闭窗口
cv2.destroyAllWindows()
