from ultralytics import YOLO
import time
import math
import numpy as np
from collections import deque
from typing import Tuple, List
import os
import sys

# 为了在脚本直接运行时能导入 D435 相机接口，这里动态加入 rs2 目录到路径
_CURRENT_DIR = os.path.dirname(__file__)
_RS2_DIR = os.path.abspath(os.path.join(_CURRENT_DIR, '..', 'rs2'))
if _RS2_DIR not in sys.path:
    sys.path.append(_RS2_DIR)
try:
    from d435 import D435Camera  # noqa: E402
except Exception:
    D435Camera = None  # 若导入失败，后续会在使用处给出明确报错


class DetectModel:
    def __init__(self, yolo_path: str, prob_threshold: float = 0.8, cache_size: int = 10, timeout: float = 2.0):
        """
        初始化目标检测类
        :param yolo_path: YOLO 模型路径
        :param prob_threshold: 概率阈值
        :param cache_size: 缓存列表最大长度
        :param timeout: 缓存超时时间（秒）
        """
        self.model = YOLO(yolo_path)
        self.prob_threshold = prob_threshold
        self.cache_size = cache_size
        self.timeout = timeout

        # 期望停靠距离（米）与 PID/速度限制参数
        self.desired_distance = 0.1
        self._pid_params = {
            'x': {'kp': 0.8, 'ki': 0.0, 'kd': 0.1},      # 前向（距离）
            'y': {'kp': 0.6, 'ki': 0.0, 'kd': 0.05},     # 侧向（水平偏移）
            'yaw': {'kp': 1.2, 'ki': 0.0, 'kd': 0.1},    # 朝向（水平角度）
        }
        self._pid_state = {
            'x': {'int': 0.0, 'prev_err': 0.0},
            'y': {'int': 0.0, 'prev_err': 0.0},
            'yaw': {'int': 0.0, 'prev_err': 0.0},
            't_prev': time.time(),
        }
        self._limits = {
            'vx': 0.6,    # m/s
            'vy': 0.6,    # m/s
            'yaw': 1.2,   # rad/s
            'int': 1.0,   # 积分限幅
        }

        # 检测结果缓存（只缓存概率超过阈值的结果）
        self.result_cache = deque(maxlen=cache_size)
        self.last_update_time = time.time()

    # ========= 抽象接口（需要你适配） =========
    def get_camera_frame(self):
        """获取相机RGB图像帧（需要你自己实现适配）"""
        raise NotImplementedError

    def get_align_depth(self):
        """获取相机对齐深度图（需要你自己实现适配）"""
        raise NotImplementedError

    def send_velocity_command(self, vel_x: float, vel_y: float, yaw_z: float):
        """下发速度控制命令到机器狗（需要你自己实现适配）"""
        raise NotImplementedError

    def turn(self, angle: float):
        """机器狗原地转向（需要你自己实现适配）"""
        raise NotImplementedError

    # ========= YOLO 检测部分 =========
    def detect(self, frame) -> List[Tuple[str, float]]:
        """
        使用YOLO检测当前帧
        :param frame: 图像帧
        :return: [(class_name, prob), ...]
        """
        results = self.model.predict(source=frame, verbose=False)
        detected = []

        if len(results) == 0:
            return detected

        result = results[0]
        for box in result.boxes:
            cls_id = int(box.cls.item())
            cls_name = self.model.names[cls_id]
            prob = float(box.conf.item())

            if prob >= self.prob_threshold:
                detected.append((cls_name, prob))

        return detected

    # ========= 连续检测与目标确认 =========
    def identifyObject(self, target_object: str) -> Tuple[bool, str]:
        """
        连续调用 detect() 确认目标是否在视野内
        :param target_object: 目标名称
        :return: (True/False, object)
        """
        while True:
            frame = self.get_camera_frame()
            detected_objs = self.detect(frame)

            found_new = False
            for name, prob in detected_objs:
                if name == target_object:
                    self.result_cache.append((name, prob))
                    self.last_update_time = time.time()
                    found_new = True

            # 判断缓存状态
            if len(self.result_cache) >= self.cache_size:
                # 统计出现频率
                counts = {}
                for obj, _ in self.result_cache:
                    counts[obj] = counts.get(obj, 0) + 1
                # 找到出现最多的两个
                top_objs = sorted(counts.items(), key=lambda x: x[1], reverse=True)[:2]
                if any(obj == target_object for obj, _ in top_objs):
                    self.result_cache.clear()
                    return True, target_object

            # 超时清空缓存
            if time.time() - self.last_update_time > self.timeout:
                self.result_cache.clear()
                return False, target_object

    # ========= 测距 =========
    def measureDistance(self, target_object: str) -> Tuple[str, float, float, Tuple[int, int, int, int]]:
        """
        测量目标距离，并返回目标边框
        :param target_object: 目标名称
        :return: (object, prob, distance_in_m, bbox)
        """
        frame = self.get_camera_frame()
        results = self.model.predict(source=frame, verbose=False)[0]

        for box in results.boxes:
            cls_id = int(box.cls.item())
            cls_name = self.model.names[cls_id]
            prob = float(box.conf.item())

            if cls_name == target_object:
                if prob < 0.5:
                    return None  # 认为跟丢
                # 获取深度图
                depth_map = self.get_align_depth()
                if depth_map is None:
                    return None
                # 安全转换张量坐标为整数像素
                bbox_xyxy = box.xyxy[0].tolist() if hasattr(box.xyxy[0], 'tolist') else box.xyxy[0]
                x1, y1, x2, y2 = map(int, bbox_xyxy)
                roi_depth = depth_map[y1:y2, x1:x2]
                if roi_depth.size == 0:
                    return None
                # 过滤无效深度值，计算平均距离（米）
                valid_mask = roi_depth > 0
                if not np.any(valid_mask):
                    return None
                avg_distance = float(np.mean(roi_depth[valid_mask]))
                return target_object, prob, avg_distance, (x1, y1, x2, y2)
        return None

    # ========= 相对位置计算 =========
    def computeRelativePosition(self, bbox, avg_distance, camera_intrinsics):
        """
        根据边框位置和深度计算相对位置
        :param bbox: (x1, y1, x2, y2)
        :param avg_distance: 平均深度值
        :param camera_intrinsics: 相机内参 (fx, fy, cx, cy)
        :return: (X, Y, Z)
        """
        x1, y1, x2, y2 = bbox
        u = float((x1 + x2) / 2)
        v = float((y1 + y2) / 2)
        fx, fy, cx, cy = map(float, camera_intrinsics)

        # 基本防御：避免除零与无效距离
        if fx == 0.0 or fy == 0.0 or avg_distance is None or not np.isfinite(avg_distance):
            return 0.0, 0.0, float('inf')

        Z = max(0.0, float(avg_distance))
        X = (u - cx) * Z / fx
        Y = (v - cy) * Z / fy
        return X, Y, Z

    # ========= 速度计算（PID框架占位） =========
    def computeVelocity(self, rel_pos, imu_data):
        """
        基于 PID 的速度计算。
        假定相机坐标系与机器人坐标系对齐：
        - X: 水平右方向（m），用于侧向纠偏
        - Y: 垂直下方向（m），此处不作为平面控制主要依据
        - Z: 前向（m），用于前进/后退控制

        :param rel_pos: (X, Y, Z)
        :param imu_data: IMU 数据（可选，当前未使用）
        :return: (vel_x, vel_y, yaw_z)
                 vel_x 前进速度(m/s)，vel_y 侧向速度(m/s)，yaw_z 角速度(rad/s)
        """

        # 若 FakeDetectModel 未调用父类 __init__，在此兜底初始化
        if not hasattr(self, '_pid_params'):
            self.desired_distance = 0.1
            self._pid_params = {
                'x': {'kp': 0.8, 'ki': 0.0, 'kd': 0.1},
                'y': {'kp': 0.6, 'ki': 0.0, 'kd': 0.05},
                'yaw': {'kp': 1.2, 'ki': 0.0, 'kd': 0.1},
            }
        if not hasattr(self, '_pid_state'):
            self._pid_state = {
                'x': {'int': 0.0, 'prev_err': 0.0},
                'y': {'int': 0.0, 'prev_err': 0.0},
                'yaw': {'int': 0.0, 'prev_err': 0.0},
                't_prev': time.time(),
            }
        if not hasattr(self, '_limits'):
            self._limits = {'vx': 0.6, 'vy': 0.6, 'yaw': 1.2, 'int': 1.0}

        X, _, Z = rel_pos
        # 距离误差：希望 Z -> desired_distance
        err_x = float(Z - self.desired_distance)
        # 水平偏移误差：希望 X -> 0
        err_y = float(X)
        # 朝向误差：使用水平角近似 atan2(X, Z)
        yaw_err = float(math.atan2(X, max(1e-6, Z)))

        # 时间步长
        now = time.time()
        dt = max(1e-3, now - self._pid_state['t_prev'])
        self._pid_state['t_prev'] = now

        def pid_axis(axis: str, error: float) -> float:
            p = self._pid_params[axis]
            s = self._pid_state[axis]
            # 积分
            s['int'] += error * dt
            s['int'] = max(-self._limits['int'], min(self._limits['int'], s['int']))
            # 微分
            d_err = (error - s['prev_err']) / dt
            s['prev_err'] = error
            # PID 输出
            return p['kp'] * error + p['ki'] * s['int'] + p['kd'] * d_err

        vx = pid_axis('x', err_x)    # 前向
        vy = pid_axis('y', err_y)    # 侧向
        yaw = pid_axis('yaw', yaw_err)

        # 限幅
        vx = max(-self._limits['vx'], min(self._limits['vx'], vx))
        vy = max(-self._limits['vy'], min(self._limits['vy'], vy))
        yaw = max(-self._limits['yaw'], min(self._limits['yaw'], yaw))

        return vx, vy, yaw

    # ========= 管理器逻辑 =========
    def ManagerIdentify(self, target_object: str):
        found, obj = self.identifyObject(target_object)
        if found:
            return self.measureDistance(obj)
        else:
            self.DogController('turn')
            return None

    def DogController(self, cmd: str):
        if cmd == 'turn':
            self.turn(180)

    def GlobalManager(self, target_object: str):
        """
        改进的全局调度：
        - 先定位一次，定位成功后进入持续跟踪循环：不断测距/位置/速度计算与下发
        - 若测距阶段丢失目标，则返回重新定位阶段
        - 当距离小于等于 0.1m 时停止
        """

        tracked_label = target_object
        intrinsics_default = (600.0, 600.0, 320.0, 240.0)

        while True:
            # 1) 定位阶段（阻塞直到发现目标或超时）
            found, obj = self.identifyObject(tracked_label)
            if not found:
                # 未找到则发指令转向后继续尝试
                self.DogController('turn')
                continue

            tracked_label = obj  # 记录锁定的目标类别名称

            # 2) 跟踪阶段（持续测距/计算/下发），直到丢失或到达
            while True:
                res = self.measureDistance(tracked_label)
                if res is None:
                    # 丢失目标，跳出到重新定位阶段
                    self.result_cache.clear()
                    break

                obj_name, prob, dist, bbox = res
                if dist <= 0.1:
                    print(f"{obj_name} 已到达 0.1m 范围内，停止")
                    return

                # 获取相机内参
                intrinsics = intrinsics_default
                try:
                    if hasattr(self, 'camera') and self.camera is not None:
                        cam_intr = self.camera.get_color_intrinsics()
                        if cam_intr is not None:
                            intrinsics = cam_intr
                except Exception:
                    pass

                # 位置与速度计算
                rel_pos = self.computeRelativePosition(bbox, dist, intrinsics)
                imu_data = None  # 占位：可接入实际 IMU
                vel_x, vel_y, yaw_z = self.computeVelocity(rel_pos, imu_data)
                self.send_velocity_command(vel_x, vel_y, yaw_z)

                # 轻微节流，避免 CPU 忙等
                time.sleep(0.01)


class D435DetectModel(DetectModel):
    """
    使用 RealSense D435 相机的 DetectModel 实现。
    适配统一接口：get_camera_frame、get_align_depth、turn、send_velocity_command。
    """

    def __init__(
        self,
        yolo_path: str,
        prob_threshold: float = 0.8,
        cache_size: int = 10,
        timeout: float = 2.0,
        width: int = 640,
        height: int = 480,
        fps: int = 30,
    ):
        if D435Camera is None:
            raise ImportError(
                '无法导入 D435Camera。请确认文件 yolo-jetson/rs2/d435.py 存在且可被导入。'
            )
        super().__init__(yolo_path, prob_threshold, cache_size, timeout)
        self.camera = D435Camera(width=width, height=height, fps=fps, autostart=True)

    # 统一接口实现
    def get_camera_frame(self):
        # 返回与彩色图对齐的 RGB 帧
        frame = self.camera.get_align_rgb()
        if frame is None:
            # 简单容错：等待下一帧
            time.sleep(0.01)
            frame = self.camera.get_align_rgb()
        if frame is None:
            raise RuntimeError('无法从 D435 获取 RGB 帧')
        return frame

    def get_align_depth(self):
        depth = self.camera.get_align_depth()
        if depth is None:
            time.sleep(0.01)
            depth = self.camera.get_align_depth()
        if depth is None:
            raise RuntimeError('无法从 D435 获取对齐深度图')
        return depth

    # 下发速度与转向接口占位实现（便于直接运行调试时不抛出异常）
    def send_velocity_command(self, vel_x: float, vel_y: float, yaw_z: float):
        print(f'[D435DetectModel] send_velocity_command: vx={vel_x:.3f}, vy={vel_y:.3f}, yaw_z={yaw_z:.3f}')

    def turn(self, angle: float):
        print(f'[D435DetectModel] turn: angle={angle}')


if __name__ == '__main__':
    # 简单连通性测试：加载模型、取一帧并跑一次检测
    # 注意：需要本地存在可用的 ultralytics YOLO 模型权重
    model_path = 'yolov8n.pt'  # 请替换为你的模型路径
    try:
        detector = D435DetectModel(model_path)
        frame = detector.get_camera_frame()
        dets = detector.detect(frame)
        print('检测结果:', dets)
    except Exception as e:
        print('运行出错:', e)
