from ultralytics import YOLO
import cv2
import numpy as np
from pathlib import Path
import torch
import os
import matplotlib.pyplot as plt
from PIL import Image
from calibration import Calibration # 读深度
from processPoint import PointProcessor # 根据pose 获取3D点
from processLine import LineProcessor # 根据3D点进行拟合，获得中心点和两个方向向量
from processVector import VectorPocessor # 根据向量组和当前向量组，计算最终结果（偏移和旋转）
from visualize import Visualize # 可视化

class YoloSegPose:
    def __init__(self, seg_model_path=None, pose_model_path=None):
        if seg_model_path is not None and pose_model_path is not None:
            self.seg_model = YOLO(seg_model_path,task="segment")
            self.pose_model = YOLO(pose_model_path,task="pose")
        # self.seg_model = YOLO(seg_model_path,task="segment")
        # self.pose_model = YOLO(pose_model_path,task="pose")
        # 露出一个点：三角形；两个点：四边形；三个点：五边形边形；四个点：四边形
        self.target_vertices={1:3,2:4,3:5,4:4}
        self.standard_edges=None
        self.standard_vector=None
        self.calibration_tool = Calibration()
        self.processPoint=PointProcessor(calibration=self.calibration_tool)
        self.processLine=LineProcessor()
        self.processVector=VectorPocessor()
        self.visualize_tool=Visualize()

    def set_standard_position(self,image_path):
        # edges,vector=self.get_edges_3d_points(image_path,
        #                 visual_edges=True,visual_edges_3d=True,visual_fitted_edges=True,
        #                 visual_fitted_vector=True)
        pose,vector=self.get_edges_3d_points(image_path)
        print("standard-vector",vector)
        if vector is not None:
            self.processVector.set_standard(vector)
        else:
            print("ERROR:", "设置标准位置失败")

    def get_polygon(self, polygon, keypoints_num=None):
        # 根据露出的关键点数，筛选出合适的多边形
        if keypoints_num is not None:
            target = [self.target_vertices[keypoints_num]]
        else:
            target = [4, 5]
        pts = np.round(polygon).astype(np.int32).reshape(-1, 1, 2)
        for epsilon in np.linspace(0.01, 0.1, 10):
            approx=cv2.approxPolyDP(pts, epsilon * cv2.arcLength(pts, True), True)
            if len(approx) in target:
                return approx
        return None
    
    def get_polygon2(self, polygon, keypoints_num=None):
        pts = polygon.reshape(-1, 2)  # [N,2]
        # Step 1: 计算凸包
        hull = cv2.convexHull(pts.astype(np.float32))
        hull = hull.reshape(-1, 2)  # [M,2]
        
        # Step 2: 均匀重采样
        perimeter = cv2.arcLength(hull.reshape(-1,1,2), True)
        num_points = max(10, int(perimeter / 2))  # 每2像素一个点
        sampled_points = []
        for i in range(len(hull)):
            # 插值计算
            ratio = i / (len(hull) - 1)
            target_idx = min(int(ratio * num_points), num_points - 1)
            if len(sampled_points) <= target_idx:
                sampled_points.append(hull[i])
        
        # Step 3: 坐标平滑
        xs = [p[0] for p in sampled_points]
        ys = [p[1] for p in sampled_points]
        xs_smooth = cv2.GaussianBlur(np.array(xs), (5,1), 0).flatten()
        ys_smooth = cv2.GaussianBlur(np.array(ys), (5,1), 0).flatten()
        smoothed_pts = np.stack([xs_smooth, ys_smooth], axis=1).astype(np.float32)
        smoothed_pts = smoothed_pts.reshape(-1, 1, 2)  # [K,1,2]

        # Step 4: 多边形拟合
        target = [self.target_vertices[keypoints_num]] if keypoints_num else [4,5]
        for epsilon in np.linspace(0.01, 0.1, 10):
            approx = cv2.approxPolyDP(
                smoothed_pts,
                epsilon * cv2.arcLength(smoothed_pts, True),
                True
            )
            if len(approx) in target:
                return approx
        return None
    
    def seg_detect(self, image_path):
        results = self.seg_model.predict(image_path)
        return results

    def pose_detect(self, image_path):
        results = self.pose_model.predict(image_path)
        return results
    
    def get_pose_dict(self, result, poly, threshold=0.5):
        """
        return {
            'left_top': {'coord': (x, y)},
            'left_bottom': {'coord': (x, y)},
            'right_top': {'coord': (x, y)},
            'left_0': {'coord': (x, y)},
            'top_0': {'coord': (x, y)},
            ... # 其他点
        }
        poly: 多边形轮廓点集，形状为(N,2)的数组
        """
        # 确保多边形是二维数组 (n, 2)
        if poly.ndim == 3:
            poly = poly.reshape(-1, 2)
        
        # 1. 获取检测到的角点
        corner_names = ["left_top", "left_bottom", "right_top", "right_bottom"]
        corner_indices = [0, 1, 2, 3]
        
        xy = result.keypoints.xy
        conf = result.keypoints.conf
        if conf is None:
            print(f"未检测到关键点")
            return None
        
        if isinstance(xy, torch.Tensor):
            xy = xy.cpu().numpy()
        if isinstance(conf, torch.Tensor):
            conf = conf.cpu().numpy()

        pose_dict = {}
        for name, idx in zip(corner_names, corner_indices):
            x, y = xy[0][idx]
            c = conf[0][idx]
            if c >= threshold:
                pose_dict[name] = {"coord": (float(x), float(y))}
        
        # 2. 从多边形中移除已检测到的角点（最近点匹配）
        remaining_poly = poly.copy()  # 避免修改原始数据
        
        # 确保remaining_poly是二维数组
        if remaining_poly.ndim == 3:
            remaining_poly = remaining_poly.reshape(-1, 2)
        
        for name in list(pose_dict.keys()):
            if name not in corner_names:
                continue
            cx, cy = pose_dict[name]["coord"]
            # 找到最近的多边形点
            min_dist = float('inf')
            min_idx = -1
            for i, pt in enumerate(remaining_poly):
                px, py = pt
                dist = (px - cx) ** 2 + (py - cy) ** 2
                if dist < min_dist:
                    min_dist = dist
                    min_idx = i
            if min_idx >= 0:
                pose_dict[name] = {"coord": tuple(remaining_poly[min_idx])}
                remaining_poly = np.delete(remaining_poly, min_idx, axis=0)

        
        # 3. 根据检测情况推断边缘点
        detected_corners = set(pose_dict.keys())
        
        # 情况1: 只露出左上角 -> 三角形
        if detected_corners == {"left_top"} and len(remaining_poly) == 2:
            # 按x坐标排序：左侧点(left_0)，右侧点(top_0)
            remaining_poly = sorted(remaining_poly, key=lambda p: p[0])
            pose_dict["left_0"] = {"coord": tuple(remaining_poly[0])}
            pose_dict["top_0"] = {"coord": tuple(remaining_poly[1])}
        
        # 情况2: 露出左上+左下 -> 四边形
        elif detected_corners == {"left_top", "left_bottom"} and len(remaining_poly) == 2:
            # 按y坐标排序：上方点(top_0)，下方点(bottom_0)
            remaining_poly = sorted(remaining_poly, key=lambda p: p[1])
            pose_dict["top_0"] = {"coord": tuple(remaining_poly[0])}
            pose_dict["bottom_0"] = {"coord": tuple(remaining_poly[1])}
        
        # 情况3: 露出左上+右上 -> 四边形
        elif detected_corners == {"left_top", "right_top"} and len(remaining_poly) == 2:
            # 按x坐标排序：左侧点(left_0)，右侧点(right_0)
            remaining_poly = sorted(remaining_poly, key=lambda p: p[0])
            pose_dict["left_0"] = {"coord": tuple(remaining_poly[0])}
            pose_dict["right_0"] = {"coord": tuple(remaining_poly[1])}
        
        # 情况4: 露出左上+左下+右上 -> 五边形
        elif detected_corners == {"left_top", "left_bottom", "right_top"} and len(remaining_poly) == 2:
            # 按y坐标排序：上方点(right_0)，下方点(bottom_0)
            remaining_poly = sorted(remaining_poly, key=lambda p: p[1])
            pose_dict["right_0"] = {"coord": tuple(remaining_poly[0])}
            pose_dict["bottom_0"] = {"coord": tuple(remaining_poly[1])}
        elif detected_corners == {"left_bottom", "right_bottom"} and len(remaining_poly) == 2:
            remaining_poly = sorted(remaining_poly, key=lambda p: p[0])
            pose_dict["left_0"] = {"coord": tuple(remaining_poly[0])}
            pose_dict["right_0"] = {"coord": tuple(remaining_poly[1])}
        return pose_dict

    def get_box_dict(self, result):
        """
        返回所有检测到的目标的box，格式为 {class_name: box_array}
        """
        if result.boxes is None or len(result.boxes) == 0:
            return None
        boxes = result.boxes.xyxy.cpu().numpy()
        class_ids = result.boxes.cls.cpu().numpy().astype(int)
        # 假设你有类别名列表
        class_names = getattr(result, "names", None)
        box_dict = {}
        for i, (box, cls_id) in enumerate(zip(boxes, class_ids)):
            name = class_names[cls_id] if class_names is not None else str(cls_id)
            box_dict[name] = box
        # print("box_dict:",box_dict)
        return box_dict
    
    def get_polygon_pose(self, image):
        fitted_pose = {}
        num_pose_points=0
        if image is None:
            return num_pose_points,fitted_pose

        # 关键点检测
        pose_results = self.pose_detect(image)
        # print("pose_results:",pose_results[0].keypoints)
        if not pose_results or len(pose_results[0].keypoints.xy)==0:
            print("未检测到关键点")
            return num_pose_points,fitted_pose
        # 分割检测
        seg_result = self.seg_detect(image)[0]
        if seg_result.masks is None or len(seg_result.masks.xy) == 0:
            print("未检测到分割结果")
            return num_pose_points,fitted_pose
        
        # 获取原始多边形
        polygon = seg_result.masks.xy[0]
        # print("原始多边形",polygon)

        # 计算露出的角点数量（置信度大于0.5的角点）
        xy = pose_results[0].keypoints.xy
        conf = pose_results[0].keypoints.conf
        print("xy:",xy)
        print("conf:",conf)
        if conf is None:
            num_pose_points = 0
        else:
            if isinstance(xy, torch.Tensor):
                xy = xy.cpu().numpy()
            if isinstance(conf, torch.Tensor):
                conf = conf.cpu().numpy()
            
            # 只考虑前4个关键点（角点）
            corner_confs = conf[0][:4]
            num_pose_points = np.sum(corner_confs >= 0.5)
        
        # 多边形拟合
        fitted_polygon = self.get_polygon2(polygon, keypoints_num=num_pose_points)
        if fitted_polygon is None:
            print("多边形拟合失败")
            return num_pose_points,fitted_pose
        
        # 使用新的get_pose_dict函数获取完整的姿态点（包括推断的边缘点）
        pose_dict = self.get_pose_dict(pose_results[0], fitted_polygon)
        
        if pose_dict is None:
            return num_pose_points,fitted_pose
        
        # 直接使用get_pose_dict返回的点位
        fitted_pose = {name: info["coord"] for name, info in pose_dict.items()}
        return num_pose_points,fitted_pose

    def get_edges_3d_points(self,image_path,visual_pose=False,
                            visual_edges=False,visual_edges_3d=False,
                            visual_fitted_edges=False,visual_fitted_vector=False):
        origin_img = cv2.imread(image_path)
        depth_img = self.get_depth_image(image_path)
        # 1. 设置彩色图和深度图的大小
        self.calibration_tool.set_resolution(origin_img,depth_img)
        # 2. 设置深度图，因为后续都是使用同一个深度图，提前设置好,返回与Color对齐的深度图
        depth_img=self.calibration_tool.set_depth_img(depth_img)
        # 3. 获取角点 {"left_top":np.array,"left_bottom":np.array}
        pose_num,pose = self.get_polygon_pose(image_path)
        if pose_num == 0:
            return None,None
        print("v3 pose:",pose)
        # visual_pose=True
        if visual_pose:
            self.visualize_tool.visualize_pose(pose,origin_img)
        if pose is None or len(pose) ==0 or len(pose) > 5:
            print("Error: Failed to get polygon pose")
            return None, None
        edges = self.processPoint.get_dict_edges(pose, 60)
        # print("边采样点：", edges)
        edges_depth = self.processPoint.get_dict_depth(edges)
        # print(edges_depth)
        # visual_edges=True
        if visual_edges:
            try:
                self.visualize_tool.visualize_edges(edges,edges_depth,origin_img,depth_img)
            except:
                print("visual_edges error:")
                print("edges:",edges)
                print("edges_depth:",edges_depth)
        # print("边采样点深度: ", edges_depth)
        edges_3d = self.processPoint.get_dict_3dpoints(edges,edges_depth)
        # visual_edges_3d=True
        if visual_edges_3d:
            try:
                self.visualize_tool.visualize_edges_3d(edges_3d)
            except:
                print("visual_edges_3d error:")
                print(edges_3d)
        
        # print("V3边采样点3D坐标: ", edges_3d)
        # fitted_edges=self.processPoint.get_dict_fitted_points(edges_3d)
        fitted_edges=self.processLine.get_dict_fitted_points(edges_3d)
        # visual_fitted_edges=True
        if visual_fitted_edges:
            try:
                self.visualize_tool.visualize_fitted_edges(edges_3d,fitted_edges)
            except:
                print("visual_fitted_edges error:")
                print("edges_3d:",edges_3d)
                print("fitted_edges:",fitted_edges)
        fitted_vector=self.processLine.get_dict_fitted_vector(fitted_edges)
        # print(fitted_vector)
        # print(fitted_lines)

        # visual_fitted_vector=True
        if visual_fitted_vector:
            try:
                self.visualize_tool.visualize_fitted_vector(self.processVector.standard_vector,fitted_vector)
            except:
                print("visual_fitted_vector error:")
                print("standard_vector:",self.processVector.standard_vector)
                print("fitted_vector:",fitted_vector)
        # print(fitted_edges,fitted_vector)
        return pose,fitted_vector

    
    def get_depth_image(self, image_path):
        # 提取文件名
        img_name = os.path.basename(image_path)

        # 获取 Color 上层目录，然后构建对应的 Depth 路径
        base_dir = os.path.dirname(os.path.dirname(image_path))
        
        # 替换路径中的 Color -> Depth 或 Color1 -> Depth1
        sub_dir = os.path.basename(os.path.dirname(image_path))  # 比如 Color 或 Color1
        if "Color" in sub_dir:
            depth_sub_dir = sub_dir.replace("Color", "Depth")
        else:
            print(f"未识别的子目录: {sub_dir}")
            return None

        depth_path = os.path.join(base_dir, depth_sub_dir, img_name)

        # 检查文件是否存在
        if not os.path.exists(depth_path):
            # 尝试将 .jpg 替换为 .png
            if img_name.endswith('.jpg'):
                png_name = img_name.replace('.jpg', '.png')
                depth_path = os.path.join(base_dir, depth_sub_dir, png_name)
                if not os.path.exists(depth_path):
                    print(f"深度图路径不存在: {depth_path}")
                    return None

        try:
            # 使用 PIL 打开图像
            pil_img = Image.open(depth_path)

            # 处理不同格式的深度图
            if pil_img.mode == 'I;16':  # 16位小端深度图
                depth_array = np.array(pil_img, dtype=np.uint16)
                # 可选：检测是否需要字节序转换（一般不需要）
                if np.max(depth_array) > 65535:
                    depth_array = depth_array.byteswap().newbyteorder()
            elif pil_img.mode == 'I':  # 32位整数深度图
                depth_array = np.array(pil_img, dtype=np.uint32)
                depth_array = (depth_array >> 16).astype(np.uint16)  # 提取高16位
            else:
                depth_array = np.array(pil_img)
                print(f"警告: 未知模式 {pil_img.mode}")

            # 返回单通道图
            if depth_array.ndim == 2:
                return depth_array
            else:
                print(f"警告: 意外通道数 {depth_array.shape}")
                return depth_array[:, :, 0]

        except Exception as e:
            print(f"深度图处理失败: {str(e)}")
            return None

        
    def test_one_image(self, image_path):
        pose,fitted_vector=self.get_edges_3d_points(image_path)
        # fitted_edges,fitted_vector=self.get_edges_3d_points(image_path,
        #                 visual_edges=True,visual_edges_3d=True,visual_fitted_edges=True,
        #                 visual_fitted_vector=True)
        vector=self.processVector.get_xyzr_by_vector(fitted_vector)
        # self.visualize_tool.visualize_vector(vector)
        # print("Pose: ",pose)
        print("偏差和夹角:",vector)
        return pose,vector


def test_realtime_visualization(yoloSegPose, start_frame=1, end_frame=600):
    """
    实时处理图像并显示结果的测试函数
    
    参数:
        yoloSegPose: YoloSegPose实例
        start_frame: 起始帧号
        end_frame: 结束帧号
    """
    # 创建图形
    fig, ax = plt.subplots(figsize=(10, 8))
    ax.set_xlim(-0.5, 5)
    ax.set_ylim(-0.5, 5)
    ax.set_aspect('equal')
    ax.grid(True)
    ax.set_title('Real-time Center Position and Orientation Vectors')
    ax.set_xlabel('X')
    ax.set_ylabel('Y')
    
    # 初始化图形元素
    center_point, = ax.plot([], [], 'ro', markersize=8, label='Center')
    x_vector = ax.quiver([], [], [], [], color='r', scale=10, width=0.005, label='X Vector')
    y_vector = ax.quiver([], [], [], [], color='g', scale=10, width=0.005, label='Y Vector')
    ax.legend()
    
    # 显示初始图形
    plt.ion()  # 开启交互模式
    plt.show()
    
    # 循环处理所有图片
    for i in range(start_frame, end_frame + 1):
        image_path = f"./data/Color1/{i}.png"
        try:
            result = yoloSegPose.test_one_image(image_path)
            if result:  # 确保结果有效
                cx, cy = result['center'][:2]
                x_vec = result['bottom_vec'][:2]
                y_vec = result['left_vec'][:2]
                
                # 更新中心点
                center_point.set_data([cx], [cy])
                
                # 更新向量
                x_vector.set_offsets([cx, cy])
                x_vector.set_UVC([x_vec[0]], [x_vec[1]])
                
                y_vector.set_offsets([cx, cy])
                y_vector.set_UVC([y_vec[0]], [y_vec[1]])
                
                ax.set_title(f'Real-time Center Position and Orientation Vectors (Frame {i})')
                
                print(f"已处理 {image_path}: center=({cx:.3f}, {cy:.3f}), "
                      f"bottom_angle={result['bottom_angle']:.2f}°, "
                      f"left_angle={result['left_angle']:.2f}°")
                
                # 重绘图形
                fig.canvas.draw()
                fig.canvas.flush_events()
                
                # 添加短暂暂停以便观察
                plt.pause(0.1)
                
        except Exception as e:
            print(f"处理图像 {image_path} 时出错: {str(e)}")
    
    plt.ioff()  # 关闭交互模式

if __name__ == "__main__":
    yoloSegPose = YoloSegPose("./merge_yolo11n_seg_rknn_model", "./merge_yolo11n_pose_rknn_model")
    yoloSegPose.set_standard_position("./data/Color/1.png")

    yoloSegPose.test_one_image("./data/Color/1753323113278.jpg")
    # 调用测试函数
    # test_realtime_visualization(yoloSegPose, start_frame=1, end_frame=600)

