# -*- coding: utf-8 -*-
import cv2
import numpy as np
import torch
from ultralytics import YOLO
import math
import argparse
from typing import List, Tuple, Optional
from PIL import Image, ImageDraw, ImageFont
import os
import time
import json


class CameraSwitcher:
    """
    摄像头切换管理器
    实现多摄像头的检测、切换和状态管理
    """
    
    def __init__(self, max_cameras: int = 5, preferred_width: int = 1280, preferred_height: int = 720, backend: str = "default"):
        """
        初始化摄像头切换器
        
        Args:
            max_cameras: 最大检测摄像头数量
            preferred_width: 期望分辨率宽度
            preferred_height: 期望分辨率高度
            backend: 打开摄像头的后端: 'default' | 'dshow' | 'msmf'
        """
        self.max_cameras = max_cameras
        self.preferred_width = preferred_width
        self.preferred_height = preferred_height
        self.backend = backend
        self.available_cameras = []
        self.current_camera_id = 0
        self.current_cap = None
        self.last_switch_time = 0
        self.switch_cooldown = 1.0  # 切换冷却时间（秒）
        
        # 检测可用摄像头
        self._detect_cameras()
    
    def _detect_cameras(self):
        """检测系统中可用的摄像头"""
        print("正在检测可用摄像头...")
        self.available_cameras = []
        
        for i in range(self.max_cameras):
            cap = cv2.VideoCapture(i)
            if cap.isOpened():
                # 尝试读取一帧来确认摄像头真正可用
                ret, _ = cap.read()
                if ret:
                    # 获取摄像头信息
                    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                    fps = cap.get(cv2.CAP_PROP_FPS)
                    
                    camera_info = {
                        'id': i,
                        'width': width,
                        'height': height,
                        'fps': fps,
                        'name': f"摄像头 {i} ({width}x{height})"
                    }
                    self.available_cameras.append(camera_info)
                    print(f"  发现摄像头 {i}: {width}x{height} @ {fps:.1f}fps")
                cap.release()
        
        if not self.available_cameras:
            print("警告: 未检测到可用摄像头")
        else:
            print(f"共检测到 {len(self.available_cameras)} 个可用摄像头")
            # 默认使用第一个摄像头
            self.current_camera_id = self.available_cameras[0]['id']
    
    def get_available_cameras(self) -> List[dict]:
        """获取可用摄像头列表"""
        return self.available_cameras.copy()
    
    def get_current_camera_info(self) -> Optional[dict]:
        """获取当前摄像头信息"""
        for camera in self.available_cameras:
            if camera['id'] == self.current_camera_id:
                return camera
        return None
    
    def switch_to_camera(self, camera_id: int) -> bool:
        """
        切换到指定摄像头
        
        Args:
            camera_id: 摄像头ID
            
        Returns:
            切换是否成功
        """
        # 检查切换冷却时间
        current_time = time.time()
        if current_time - self.last_switch_time < self.switch_cooldown:
            return False
        
        # 检查摄像头是否可用
        camera_available = any(cam['id'] == camera_id for cam in self.available_cameras)
        if not camera_available:
            print(f"摄像头 {camera_id} 不可用")
            return False
        
        # 如果已经是当前摄像头，无需切换
        if camera_id == self.current_camera_id and self.current_cap is not None:
            return True
        
        # 释放当前摄像头
        if self.current_cap is not None:
            self.current_cap.release()
            self.current_cap = None
        
        # 打开新摄像头（选择后端）
        backend_flag = {
            "default": None,
            "dshow": cv2.CAP_DSHOW,
            "msmf": cv2.CAP_MSMF
        }.get(self.backend, None)
        new_cap = cv2.VideoCapture(camera_id) if backend_flag is None else cv2.VideoCapture(camera_id, backend_flag)
        if new_cap.isOpened():
            # 先设置编码为 MJPG，部分 USB 摄像头才能上 720p/1080p
            try:
                new_cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"MJPG"))
            except Exception:
                pass
            # 设置期望分辨率
            if self.preferred_width and self.preferred_height:
                new_cap.set(cv2.CAP_PROP_FRAME_WIDTH, int(self.preferred_width))
                new_cap.set(cv2.CAP_PROP_FRAME_HEIGHT, int(self.preferred_height))
            time.sleep(0.05)
            ret, _ = new_cap.read()
            if ret:
                # 记录实际生效分辨率
                w = int(new_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                h = int(new_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                fps = new_cap.get(cv2.CAP_PROP_FPS)
                # 更新当前 capture
                self.current_cap = new_cap
                self.current_camera_id = camera_id
                self.last_switch_time = current_time
                # 更新列表中的显示名称
                for cam in self.available_cameras:
                    if cam['id'] == camera_id:
                        cam['width'] = w
                        cam['height'] = h
                        cam['fps'] = fps
                        cam['name'] = f"摄像头 {camera_id} ({w}x{h})"
                        break
                print(f"已切换到 摄像头 {camera_id} ({w}x{h} @ {fps:.1f}fps)")
                return True
            else:
                new_cap.release()
        
        print(f"切换到摄像头 {camera_id} 失败")
        return False
    
    def switch_to_next_camera(self) -> bool:
        """切换到下一个可用摄像头"""
        if len(self.available_cameras) <= 1:
            return False
        
        # 找到当前摄像头在列表中的位置
        current_index = -1
        for i, camera in enumerate(self.available_cameras):
            if camera['id'] == self.current_camera_id:
                current_index = i
                break
        
        # 切换到下一个摄像头
        next_index = (current_index + 1) % len(self.available_cameras)
        next_camera_id = self.available_cameras[next_index]['id']
        
        return self.switch_to_camera(next_camera_id)
    
    def switch_to_previous_camera(self) -> bool:
        """切换到上一个可用摄像头"""
        if len(self.available_cameras) <= 1:
            return False
        
        # 找到当前摄像头在列表中的位置
        current_index = -1
        for i, camera in enumerate(self.available_cameras):
            if camera['id'] == self.current_camera_id:
                current_index = i
                break
        
        # 切换到上一个摄像头
        prev_index = (current_index - 1) % len(self.available_cameras)
        prev_camera_id = self.available_cameras[prev_index]['id']
        
        return self.switch_to_camera(prev_camera_id)
    
    def get_current_capture(self) -> Optional[cv2.VideoCapture]:
        """获取当前摄像头捕获对象"""
        if self.current_cap is None and self.available_cameras:
            # 尝试初始化当前摄像头
            self.switch_to_camera(self.current_camera_id)
        return self.current_cap
    
    def release(self):
        """释放所有资源"""
        if self.current_cap is not None:
            self.current_cap.release()
            self.current_cap = None


def display_camera_info(frame: np.ndarray, camera_switcher: CameraSwitcher) -> np.ndarray:
    """
    在画面上显示摄像头信息和切换提示
    
    Args:
        frame: 原始图像
        camera_switcher: 摄像头切换器
        
    Returns:
        添加信息后的图像
    """
    vis = frame.copy()
    h, w = vis.shape[:2]
    
    # 获取当前摄像头信息
    current_info = camera_switcher.get_current_camera_info()
    available_cameras = camera_switcher.get_available_cameras()
    
    if current_info:
        # 显示当前摄像头信息（左下角）
        info_text = f"当前: {current_info['name']}"
        vis = put_label_box(vis, info_text, (10, h - 80), 20, (255, 255, 255), (0, 50, 0))
        
        # 显示可用摄像头数量
        count_text = f"可用摄像头: {len(available_cameras)}"
        vis = put_label_box(vis, count_text, (10, h - 50), 18, (255, 255, 255), (50, 50, 50))
    
    # 显示切换提示（左下角）
    if len(available_cameras) > 1:
        help_text = "按键: 1-9切换 | Tab下一个 | Shift+Tab上一个"
        vis = put_label_box(vis, help_text, (10, h - 20), 16, (200, 200, 200), (30, 30, 30))
    
    return vis


    """
    判断是否为右手
    
    Args:
        keypoints: 手部关键点坐标 (21, 3)，格式为 [x, y, confidence]
    
    Returns:
        True表示右手，False表示左手
    """
    if len(keypoints) < 21:
        return False
    
    # 通过拇指和小指的相对位置判断
    thumb_tip = keypoints[4][:2]  # 拇指指尖
    wrist = keypoints[0][:2]  # 手腕
    
    # 计算手掌中心线向量（从手腕到中指根部）
    middle_mcp = keypoints[9][:2]  # 中指根部
    palm_vector = middle_mcp - wrist
    
    # 计算拇指相对于手掌中心线的位置
    thumb_vector = thumb_tip - wrist
    cross_product = np.cross(palm_vector, thumb_vector)
    
    # 根据叉积判断左右手，右手的叉积为正
    return cross_product > 0


def is_right_hand(keypoints: np.ndarray) -> bool:
    """
    判断是否为右手
    
    Args:
        keypoints: 手部关键点坐标 (21, 3)，格式为 [x, y, confidence]
    
    Returns:
        True表示右手，False表示左手
    """
    if len(keypoints) < 21:
        return False
    
    # 通过拇指和小指的相对位置判断
    thumb_tip = keypoints[4][:2]  # 拇指指尖
    wrist = keypoints[0][:2]  # 手腕
    
    # 计算手掌中心线向量（从手腕到中指根部）
    middle_mcp = keypoints[9][:2]  # 中指根部
    palm_vector = middle_mcp - wrist
    
    # 计算拇指相对于手掌中心线的位置
    thumb_vector = thumb_tip - wrist
    cross_product = np.cross(palm_vector, thumb_vector)
    
    # 根据叉积判断左右手，右手的叉积为正
    return cross_product > 0


def is_palm_front_facing(keypoints: np.ndarray) -> bool:
    """
    判断手掌是否为正面朝向
    
    Args:
        keypoints: 手部关键点坐标 (21, 3)，格式为 [x, y, confidence]
    
    Returns:
        True表示手掌正面，False表示手掌背面或侧面
    """
    if len(keypoints) < 21:
        return False
    
    # 检查关键点置信度阈值
    confidence_threshold = 0.3
    
    # 1. 检查指尖的可见性（正面时指尖更容易被检测到）
    fingertips = [4, 8, 12, 16, 20]  # 拇指、食指、中指、无名指、小指指尖
    visible_fingertips = sum(1 for i in fingertips if keypoints[i][2] > confidence_threshold)
    
    # 2. 检查指根关节的可见性
    finger_bases = [1, 5, 9, 13, 17]  # 各手指根部
    visible_bases = sum(1 for i in finger_bases if keypoints[i][2] > confidence_threshold)
    
    # 3. 计算手掌平面法向量（通过三个关键点）
    try:
        wrist = keypoints[0][:2]  # 手腕
        middle_mcp = keypoints[9][:2]  # 中指根部
        index_mcp = keypoints[5][:2]  # 食指根部
        
        # 计算两个向量
        v1 = middle_mcp - wrist
        v2 = index_mcp - wrist
        
        # 计算叉积（法向量的z分量）
        cross_z = np.cross(v1, v2)
        
        # 4. 检查手指展开程度（正面时手指更容易展开）
        finger_spread_score = 0
        
        # 计算相邻手指间的角度
        finger_tips = [keypoints[i][:2] for i in [8, 12, 16, 20] if keypoints[i][2] > confidence_threshold]
        if len(finger_tips) >= 3:
            for i in range(len(finger_tips) - 1):
                v1 = finger_tips[i] - wrist
                v2 = finger_tips[i + 1] - wrist
                
                # 计算角度
                cos_angle = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
                cos_angle = np.clip(cos_angle, -1, 1)
                angle = np.arccos(cos_angle)
                
                # 角度越大，展开程度越高
                if angle > np.pi / 6:  # 大于30度
                    finger_spread_score += 1
        
        # 5. 综合判断
        front_score = 0
        
        # 指尖可见性权重
        if visible_fingertips >= 4:
            front_score += 2
        elif visible_fingertips >= 3:
            front_score += 1
        
        # 指根可见性权重
        if visible_bases >= 4:
            front_score += 1
        
        # 手指展开程度权重
        front_score += finger_spread_score
        
        # 法向量方向权重（正面时叉积应该为正）
        if cross_z > 0:
            front_score += 1
        
        # 判断阈值：总分大于等于3认为是正面
        return front_score >= 3
        
    except Exception as e:
        # 如果计算出错，默认返回False
        return False


def calculate_finger_angles(keypoints: np.ndarray) -> List[float]:
    """
    计算右手手指的张开角度（仅处理右手正面）
    
    Args:
        keypoints: 手部关键点坐标 (21, 3)，格式为 [x, y, confidence]
    
    Returns:
        各手指的张开角度列表
    """
    angles = []
    
    # 检查关键点数组是否有效
    if len(keypoints) < 21:
        return angles
    
    # 只处理右手
    if not is_right_hand(keypoints):
        return angles
    
    # 只处理手掌正面
    if not is_palm_front_facing(keypoints):
        return angles
    
    # 手指的索引映射（指尖到指根）
    finger_indices = {
        'thumb': [4, 3, 2, 1, 0],      # 拇指
        'index': [8, 7, 6, 5, 0],      # 食指
        'middle': [12, 11, 10, 9, 0],  # 中指
        'ring': [16, 15, 14, 13, 0],   # 无名指
        'pinky': [20, 19, 18, 17, 0]   # 小指
    }
    
    # 计算相邻手指间的角度
    fingers = ['thumb', 'index', 'middle', 'ring', 'pinky']
    
    for i in range(len(fingers) - 1):
        # 获取当前手指和下一个手指的指尖
        current_tip = keypoints[finger_indices[fingers[i]][0]]
        next_tip = keypoints[finger_indices[fingers[i+1]][0]]
        
        # 获取手掌中心点（近似为指根连接点）
        palm_center = keypoints[0]
        
        # 计算向量
        vec1 = current_tip[:2] - palm_center[:2]
        vec2 = next_tip[:2] - palm_center[:2]
        
        # 计算向量长度
        len1 = np.linalg.norm(vec1)
        len2 = np.linalg.norm(vec2)
        
        if len1 > 0 and len2 > 0:
            # 计算点积
            dot_product = np.dot(vec1, vec2)
            
            # 计算夹角的余弦值
            cos_theta = dot_product / (len1 * len2)
            
            # 限制余弦值在有效范围内
            cos_theta = np.clip(cos_theta, -1, 1)
            
            # 计算角度（弧度转角度）
            angle = np.arccos(cos_theta) * 180 / np.pi
            angles.append(angle)
        else:
            angles.append(0)
    
    return angles


def calculate_palm_openness(angles: List[float]) -> float:
    """
    计算手掌的整体张开程度（0-100%）
    
    Args:
        angles: 各手指间的角度列表
    
    Returns:
        手掌张开度百分比
    """
    if not angles:
        return 0.0
    
    # 假设手指间最大角度为90度，最小为0度
    max_angle = 20.0
    min_angle = 10.0
    
    # 计算平均角度
    avg_angle = sum(angles) / len(angles)
    
    # 归一化到0-100%
    openness = ((avg_angle - min_angle) / (max_angle - min_angle)) * 100
    
    # 确保在有效范围内
    openness = max(0.0, min(100.0, openness))
    
    return openness


def put_chinese_text(img, text, position, font_size=30, color=(255, 255, 255)):
    """
    在OpenCV图像上绘制中文文本
    
    Args:
        img: OpenCV图像
        text: 要绘制的中文文本
        position: 文本位置 (x, y)
        font_size: 字体大小
        color: 文本颜色 (B, G, R)
    
    Returns:
        绘制文本后的图像
    """
    # 将OpenCV图像转换为PIL图像
    img_pil = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    draw = ImageDraw.Draw(img_pil)
    
    # 尝试使用系统字体
    try:
        # Windows系统字体路径
        font_path = "C:/Windows/Fonts/simhei.ttf"  # 黑体
        if not os.path.exists(font_path):
            font_path = "C:/Windows/Fonts/msyh.ttc"  # 微软雅黑
        if not os.path.exists(font_path):
            font_path = "C:/Windows/Fonts/simsun.ttc"  # 宋体
        
        if os.path.exists(font_path):
            font = ImageFont.truetype(font_path, font_size)
        else:
            font = ImageFont.load_default()
    except:
        font = ImageFont.load_default()
    
    # 绘制文本
    draw.text(position, text, font=font, fill=color)
    
    # 转换回OpenCV格式
    img_cv = cv2.cvtColor(np.array(img_pil), cv2.COLOR_RGB2BGR)
    return img_cv


def put_label_box(img, text, position, font_size=22, text_color=(255, 255, 255), box_color=(0, 0, 0)):
    """
    在图像上绘制带背景框的中文文本，提升可读性，避免与其他内容重叠
    Args:
        img: OpenCV图像
        text: 文本内容
        position: 左上角坐标 (x, y)
        font_size: 字体大小
        text_color: 文本颜色 (B, G, R)
        box_color: 背景框颜色 (B, G, R)
    Returns:
        绘制后的图像
    """
    img_pil = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    draw = ImageDraw.Draw(img_pil)
    # 选择字体
    try:
        font_path = "C:/Windows/Fonts/simhei.ttf"
        if not os.path.exists(font_path):
            font_path = "C:/Windows/Fonts/msyh.ttc"
        if not os.path.exists(font_path):
            font_path = "C:/Windows/Fonts/simsun.ttc"
        font = ImageFont.truetype(font_path, font_size) if os.path.exists(font_path) else ImageFont.load_default()
    except:
        font = ImageFont.load_default()

    # 计算文本边界并绘制背景框
    x, y = position
    try:
        # PIL >= 8: 使用 textbbox
        bbox = draw.textbbox((x, y), text, font=font)
        left, top, right, bottom = bbox
    except Exception:
        # 回退：使用估算尺寸
        w, h = font.getsize(text)
        left, top, right, bottom = x, y, x + w + 6, y + h + 6

    pad = 6
    draw.rectangle([(left - pad, top - pad), (right + pad, bottom + pad)], fill=(box_color[2], box_color[1], box_color[0]))
    draw.text((x, y), text, font=font, fill=(text_color[2], text_color[1], text_color[0]))

    return cv2.cvtColor(np.array(img_pil), cv2.COLOR_RGB2BGR)

def calculate_joint_angle(a: np.ndarray, b: np.ndarray, c: np.ndarray) -> Optional[float]:
    """
    计算在点b处由向量ba与bc构成的夹角（度）
    Args:
        a, b, c: 三个点坐标 (x, y)
    Returns:
        角度（度），若向量异常返回None
    """
    try:
        v1 = a - b
        v2 = c - b
        n1 = np.linalg.norm(v1)
        n2 = np.linalg.norm(v2)
        if n1 < 1e-6 or n2 < 1e-6:
            return None
        cos_theta = np.dot(v1, v2) / (n1 * n2)
        cos_theta = np.clip(cos_theta, -1.0, 1.0)
        angle = np.degrees(np.arccos(cos_theta))
        return float(angle)
    except Exception:
        return None


def _get_torso_anchor(pose_keypoints: np.ndarray, conf_thr: float = 0.3) -> Optional[np.ndarray]:
    """
    选择用于肩角计算的躯干锚点：优先右髋，其次左右髋的中点；再退化为颈部近似（左右肩中点）；最后使用图像垂直向上的虚拟点。
    Returns: (x, y) 或 None
    """
    if pose_keypoints is None or len(pose_keypoints) < 17:
        return None
    L_HIP, R_HIP, L_SHO, R_SHO = 11, 12, 5, 6
    # 1) 右髋
    if pose_keypoints[R_HIP][2] >= conf_thr:
        return pose_keypoints[R_HIP][:2]
    # 2) 左髋
    if pose_keypoints[L_HIP][2] >= conf_thr:
        return pose_keypoints[L_HIP][:2]
    # 3) 髋中心（如果至少有一个髋置信度较低但肩置信度较高，仍尝试）
    hips = []
    if pose_keypoints[L_HIP][2] >= 0.15:
        hips.append(pose_keypoints[L_HIP][:2])
    if pose_keypoints[R_HIP][2] >= 0.15:
        hips.append(pose_keypoints[R_HIP][:2])
    if len(hips) == 1:
        return hips[0]
    if len(hips) == 2:
        return (hips[0] + hips[1]) / 2.0
    # 4) 颈部近似（左右肩的中点）
    if pose_keypoints[L_SHO][2] >= conf_thr and pose_keypoints[R_SHO][2] >= conf_thr:
        return (pose_keypoints[L_SHO][:2] + pose_keypoints[R_SHO][:2]) / 2.0
    return None


def compute_right_arm_angles(pose_keypoints: np.ndarray, hand_keypoints: Optional[np.ndarray], conf_thr: float = 0.3) -> Tuple[Optional[float], Optional[float], Optional[float]]:
    """
    计算右臂的三个关键关节角度：肩、肘、腕（含躯干锚点回退策略）
    Args:
        pose_keypoints: 人体17关键点 (17,3)
        hand_keypoints: 手部21关键点 (21,3) 或 None
        conf_thr: 置信度阈值
    Returns:
        (shoulder_angle, elbow_angle, wrist_angle) 单位：度
    """
    if pose_keypoints is None or len(pose_keypoints) < 17:
        return None, None, None

    # COCO关键点索引（右侧）
    R_SHO, R_ELB, R_WRI = 6, 8, 10

    shoulder_angle = None
    if (pose_keypoints[R_SHO][2] >= conf_thr and pose_keypoints[R_ELB][2] >= conf_thr):
        shoulder = pose_keypoints[R_SHO][:2]
        elbow = pose_keypoints[R_ELB][:2]
        torso_anchor = _get_torso_anchor(pose_keypoints, conf_thr)
        if torso_anchor is not None:
            shoulder_angle = calculate_joint_angle(torso_anchor, shoulder, elbow)
        else:
            # 最后退化：使用图像垂直方向的虚拟点（向上100像素）作为躯干方向
            up_anchor = np.array([shoulder[0], shoulder[1] - 100.0])
            shoulder_angle = calculate_joint_angle(up_anchor, shoulder, elbow)

    # 肘部角度：在肘点处，上臂（肘<-肩）与前臂（肘->腕）的夹角
    elbow_angle = None
    if (pose_keypoints[R_SHO][2] >= conf_thr and pose_keypoints[R_ELB][2] >= conf_thr and pose_keypoints[R_WRI][2] >= conf_thr):
        shoulder = pose_keypoints[R_SHO][:2]
        elbow = pose_keypoints[R_ELB][:2]
        wrist = pose_keypoints[R_WRI][:2]
        elbow_angle = calculate_joint_angle(shoulder, elbow, wrist)

    # 手腕角度：在腕点处，前臂（腕<-肘）与手掌轴线（腕->中指根）夹角（需手部关键点）
    wrist_angle = None
    if hand_keypoints is not None and len(hand_keypoints) >= 21:
        wrist_conf = hand_keypoints[0][2]
        middle_mcp_conf = hand_keypoints[9][2]
        if wrist_conf >= conf_thr and middle_mcp_conf >= conf_thr and pose_keypoints[R_ELB][2] >= conf_thr:
            elbow = pose_keypoints[R_ELB][:2]
            wrist = hand_keypoints[0][:2]
            middle_mcp = hand_keypoints[9][:2]
            wrist_angle = calculate_joint_angle(elbow, wrist, middle_mcp)

    return shoulder_angle, elbow_angle, wrist_angle


def visualize_arm_angles(frame: np.ndarray, shoulder: Optional[float], elbow: Optional[float], wrist: Optional[float]) -> np.ndarray:
    """
    在图像上叠加右臂角度信息
    """
    vis = frame.copy()
    h, w = vis.shape[:2]
    base_x = max(10, w - 300)  # 将右臂角度信息放到右上角，避免与左侧手部信息重叠
    base_y = 30
    step = 32
    # 使用中文文本叠加（带背景框）
    s_txt = f"右肩角度: {shoulder:.1f}°" if shoulder is not None else "右肩角度: N/A"
    e_txt = f"右肘角度: {elbow:.1f}°" if elbow is not None else "右肘角度: N/A"
    w_txt = f"右腕角度: {wrist:.1f}°" if wrist is not None else "右腕角度: N/A"

    vis = put_label_box(vis, s_txt, (base_x, base_y), 22, (255, 255, 255), (40, 40, 40))
    vis = put_label_box(vis, e_txt, (base_x, base_y + step), 22, (255, 255, 255), (40, 40, 40))
    vis = put_label_box(vis, w_txt, (base_x, base_y + step * 2), 22, (255, 255, 255), (40, 40, 40))
    return vis


def draw_right_arm_skeleton(frame: np.ndarray, pose_keypoints: Optional[np.ndarray], hand_keypoints: Optional[np.ndarray], conf_thr: float = 0.3) -> np.ndarray:
    """
    绘制右臂骨骼：肩-肘-腕，并延伸到手掌（腕-中指根），提高视觉理解
    Args:
        frame: 原始图像
        pose_keypoints: 人体17关键点 (17,3)
        hand_keypoints: 手部21关键点 (21,3) 或 None
        conf_thr: 关键点置信度阈值
    Returns:
        绘制后的图像
    """
    vis = frame.copy()
    if pose_keypoints is None or len(pose_keypoints) < 17:
        return vis

    R_SHO, R_ELB, R_WRI, R_HIP = 6, 8, 10, 12
    # 检查并提取关键点
    def kp_ok(idx):
        return pose_keypoints[idx][2] >= conf_thr

    if kp_ok(R_SHO):
        sx, sy = pose_keypoints[R_SHO][:2]
        cv2.circle(vis, (int(sx), int(sy)), 6, (255, 200, 0), -1)
    if kp_ok(R_ELB):
        ex, ey = pose_keypoints[R_ELB][:2]
        cv2.circle(vis, (int(ex), int(ey)), 6, (0, 200, 255), -1)
    if kp_ok(R_WRI):
        wx, wy = pose_keypoints[R_WRI][:2]
        cv2.circle(vis, (int(wx), int(wy)), 6, (0, 255, 0), -1)
    # 右髋关键点
    if kp_ok(R_HIP):
        hx, hy = pose_keypoints[R_HIP][:2]
        cv2.circle(vis, (int(hx), int(hy)), 7, (0, 0, 255), -1)

    # 画上臂与前臂
    if kp_ok(R_SHO) and kp_ok(R_ELB):
        cv2.line(vis, (int(pose_keypoints[R_SHO][0]), int(pose_keypoints[R_SHO][1])),
                 (int(pose_keypoints[R_ELB][0]), int(pose_keypoints[R_ELB][1])), (255, 128, 0), 3)
    if kp_ok(R_ELB) and kp_ok(R_WRI):
        cv2.line(vis, (int(pose_keypoints[R_ELB][0]), int(pose_keypoints[R_ELB][1])),
                 (int(pose_keypoints[R_WRI][0]), int(pose_keypoints[R_WRI][1])), (0, 255, 128), 3)
    # 画肩-髋（右侧躯干线段）
    if kp_ok(R_SHO) and kp_ok(R_HIP):
        cv2.line(vis, (int(pose_keypoints[R_SHO][0]), int(pose_keypoints[R_SHO][1])),
                 (int(pose_keypoints[R_HIP][0]), int(pose_keypoints[R_HIP][1])), (128, 0, 255), 3)

    # 若有手关键点，画腕到中指根以表示腕方向
    if hand_keypoints is not None and len(hand_keypoints) >= 21:
        wrist_conf = hand_keypoints[0][2]
        middle_mcp_conf = hand_keypoints[9][2]
        if wrist_conf >= conf_thr and middle_mcp_conf >= conf_thr:
            wx, wy = hand_keypoints[0][:2]
            mx, my = hand_keypoints[9][:2]
            cv2.circle(vis, (int(mx), int(my)), 6, (255, 0, 255), -1)
            cv2.line(vis, (int(wx), int(wy)), (int(mx), int(my)), (255, 0, 255), 3)

    return vis

def visualize_hand_detection(frame: np.ndarray, results, angles: List[float], openness: float) -> np.ndarray:
    """
    在图像上可视化检测结果和角度信息（仅显示右手）
    
    Args:
        frame: 原始图像
        results: 模型检测结果
        angles: 手指角度列表
        openness: 手掌张开度
    
    Returns:
        可视化后的图像
    """
    # 复制图像以避免修改原始图像
    vis_frame = frame.copy()
    
    # 获取关键点
    if results[0].keypoints is not None and len(results[0].keypoints.data) > 0:
        # 遍历所有检测到的手，只处理右手
        for i, keypoints in enumerate(results[0].keypoints.data):
            keypoints = keypoints.cpu().numpy()
            
            # 检查关键点数组是否有效且为右手
            if len(keypoints) >= 21 and is_right_hand(keypoints):
                # 绘制关键点并标注序号
                for j, kp in enumerate(keypoints):
                    x, y, conf = kp
                    if conf > 0.5:  # 只绘制置信度高的点
                        cv2.circle(vis_frame, (int(x), int(y)), 5, (0, 255, 0), -1)
                        # 在关键点旁边标注序号
                        cv2.putText(vis_frame, str(j), (int(x)+10, int(y)-10), 
                                   cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
                
                # 绘制骨骼连接
                # 手指的索引映射（指尖到指根）
                finger_indices = {
                    'thumb': [4, 3, 2, 1, 0],      # 拇指
                    'index': [8, 7, 6, 5, 0],      # 食指
                    'middle': [12, 11, 10, 9, 0],  # 中指
                    'ring': [16, 15, 14, 13, 0],   # 无名指
                    'pinky': [20, 19, 18, 17, 0]   # 小指
                }
                
                # 绘制每条手指
                colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255)]
                
                for k, (finger, indices) in enumerate(finger_indices.items()):
                    for l in range(len(indices) - 1):
                        x1, y1, conf1 = keypoints[indices[l]]
                        x2, y2, conf2 = keypoints[indices[l+1]]
                        
                        if conf1 > 0.5 and conf2 > 0.5:
                            cv2.line(vis_frame, (int(x1), int(y1)), (int(x2), int(y2)), colors[k], 2)
                
                # 只处理第一个检测到的右手
                break
    
    # 显示检测状态
    hand_detected = False
    front_facing = False
    if results[0].keypoints is not None and len(results[0].keypoints.data) > 0:
        for keypoints in results[0].keypoints.data:
            keypoints = keypoints.cpu().numpy()
            if len(keypoints) >= 21 and is_right_hand(keypoints):
                hand_detected = True
                if is_palm_front_facing(keypoints):
                    front_facing = True
                break
    
    if hand_detected and front_facing:
        vis_frame = put_chinese_text(vis_frame, f'右手张开度: {openness:.1f}%', (10, 30), 25, (0, 255, 255))
        vis_frame = put_chinese_text(vis_frame, '检测到: 右手正面', (10, 70), 25, (0, 255, 0))
        
        # 显示手指间角度（使用中文）
        finger_names = ['拇指-食指', '食指-中指', '中指-无名指', '无名指-小指']
        for i, (name, angle) in enumerate(zip(finger_names, angles)):
            vis_frame = put_chinese_text(vis_frame, f'{name}: {angle:.1f}°', 
                                       (10, 110 + i*40), 20, (255, 255, 255))
    elif hand_detected and not front_facing:
        vis_frame = put_chinese_text(vis_frame, '检测到右手，但非正面', (10, 30), 25, (255, 165, 0))
        vis_frame = put_chinese_text(vis_frame, '请将右手正面朝向摄像头', (10, 70), 20, (255, 255, 255))
    else:
        vis_frame = put_chinese_text(vis_frame, '未检测到右手', (10, 30), 25, (0, 0, 255))
        vis_frame = put_chinese_text(vis_frame, '请将右手正面放在摄像头前', (10, 70), 20, (255, 255, 255))
    
    return vis_frame


def process_image(hand_model: YOLO, pose_model: YOLO, image_path: str, save_output: bool = False) -> None:
    """
    处理单张图像（仅识别右手）
    
    Args:
        model: YOLO模型
        image_path: 图像路径
        save_output: 是否保存输出图像
    """
    # 加载图像
    frame = cv2.imread(image_path)
    if frame is None:
        print(f"无法加载图像: {image_path}")
        return
    
    # 进行检测
    hand_results = hand_model(frame)
    pose_results = pose_model(frame)
    
    # 处理检测结果（仅处理右手正面）
    angles = []
    openness = 0.0
    
    right_hand_kp = None
    if hand_results[0].keypoints is not None and len(hand_results[0].keypoints.data) > 0:
        # 寻找第一个检测到的右手正面
        for keypoints in hand_results[0].keypoints.data:
            keypoints = keypoints.cpu().numpy()
            
            if len(keypoints) >= 21 and is_right_hand(keypoints) and is_palm_front_facing(keypoints):
                # 计算手指角度
                angles = calculate_finger_angles(keypoints)
                
                # 计算手掌张开度
                openness = calculate_palm_openness(angles)
                
                # 保存用于腕关节角度的手关键点
                right_hand_kp = keypoints
                print(f"右手正面张开度: {openness:.1f}%")
                for i, angle in enumerate(angles):
                    print(f"手指间角度 {i+1}: {angle:.1f}°")
                break
        
        if not angles:
            print("未检测到右手正面")

    # 计算右臂角度（肩、肘、腕）
    shoulder_deg, elbow_deg, wrist_deg = None, None, None
    if pose_results[0].keypoints is not None and len(pose_results[0].keypoints.data) > 0:
        # 选择与右手最匹配的姿态（优先右腕与手腕距离最近）
        selected_pose = None
        if right_hand_kp is not None:
            wrist_xy = right_hand_kp[0][:2]
            min_dist = 1e9
            for kp in pose_results[0].keypoints.data:
                kp = kp.cpu().numpy()
                if len(kp) >= 17 and kp[10][2] >= 0.2:  # 右腕置信度
                    d = np.linalg.norm(kp[10][:2] - wrist_xy)
                    if d < min_dist:
                        min_dist = d
                        selected_pose = kp
        if selected_pose is None:
            selected_pose = pose_results[0].keypoints.data[0].cpu().numpy()
        pose_kp = selected_pose
        shoulder_deg, elbow_deg, wrist_deg = compute_right_arm_angles(pose_kp, right_hand_kp)
        # 输出角度
        print("右臂角度：")
        print(f" - 肩关节: {shoulder_deg:.1f}°" if shoulder_deg is not None else " - 肩关节: N/A")
        print(f" - 肘关节: {elbow_deg:.1f}°" if elbow_deg is not None else " - 肘关节: N/A")
        print(f" - 腕关节: {wrist_deg:.1f}°" if wrist_deg is not None else " - 腕关节: N/A")

    # 可视化结果（手部 + 右臂骨骼 + 角度）
    vis_frame = visualize_hand_detection(frame, hand_results, angles, openness)
    vis_frame = draw_right_arm_skeleton(vis_frame, pose_kp if 'pose_kp' in locals() else None, right_hand_kp)
    vis_frame = visualize_arm_angles(vis_frame, shoulder_deg, elbow_deg, wrist_deg)
    
    # 显示结果
    cv2.imshow('右手检测 + 右臂角度', vis_frame)
    
    # 保存输出
    if save_output:
        output_path = '右手与右臂角度_' + image_path.split('/')[-1]
        cv2.imwrite(output_path, vis_frame)
        print(f"结果已保存至: {output_path}")
    
    # 等待按键
    cv2.waitKey(0)
    cv2.destroyAllWindows()


def process_video(hand_model: YOLO, pose_model: YOLO, video_source: int = 0, save_output: bool = False, controller: Optional[object] = None, preferred_width: int = 1280, preferred_height: int = 720, cam_backend: str = "default", use_ttk: bool = False) -> None:
    """
    处理视频流或摄像头（仅识别右手正面），并可选发送控制指令
    
    Args:
        model: YOLO模型
        video_source: 视频源（0表示默认摄像头）
        save_output: 是否保存输出视频
        controller: 机械臂控制器（可为None以禁用控制）
    """
    # 初始化摄像头切换器
    camera_switcher = CameraSwitcher(preferred_width=preferred_width, preferred_height=preferred_height, backend=cam_backend)
    
    # 尝试切换到指定摄像头
    if not camera_switcher.switch_to_camera(video_source):
        print(f"无法打开摄像头 {video_source}")
        return
    
    cap = camera_switcher.get_current_capture()
    if cap is None:
        print("无法获取摄像头")
        return
    
    # 设置视频保存
    out = None
    if save_output:
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fps = cap.get(cv2.CAP_PROP_FPS)
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter('右手正面检测输出.avi', fourcc, fps, (width, height))
    
    print("按 'q' 键退出程序")
    print("程序将只识别和显示右手正面")
    print("摄像头切换: 1-9数字键切换 | Tab下一个 | n下一个 | p上一个 | c显示列表 | r复位 | 鼠标点击右上角开关启用/禁用发送")
    
    # 初始化 HUD/UI 状态与窗口
    ui_state = UIState(control_enabled=True)

    # 可选初始化 ttkbootstrap GUI
    ttk_panel = None
    if use_ttk:
        try:
            from .gui_ttk import TTKPanel  # 如果作为包导入失败，尝试相对路径
        except Exception:
            from gui_ttk import TTKPanel
        ttk_panel = TTKPanel(title="右手检测与臂控", theme="darkly")
        if controller is not None:
            try:
                ttk_panel.apply_controller_defaults(controller)
            except Exception:
                pass
    # 窗口标题使用ASCII，避免部分OpenCV构建下的中文乱码
    window_name = 'Hand + Arm'
    cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
    cv2.setMouseCallback(window_name, mouse_handler, ui_state)
    
    frame_idx = 0
    while True:
        # 获取当前摄像头
        cap = camera_switcher.get_current_capture()
        if cap is None:
            print("摄像头连接丢失")
            break
            
        # 读取一帧
        ret, frame = cap.read()
        if not ret:
            print("无法读取摄像头画面")
            time.sleep(0.1)
            continue
        
        # 进行检测
        hand_results = hand_model(frame)
        pose_results = pose_model(frame)
        
        # 处理检测结果（仅处理右手正面）
        angles = []
        openness = 0.0
        
        right_hand_kp = None
        if hand_results[0].keypoints is not None and len(hand_results[0].keypoints.data) > 0:
            # 寻找第一个检测到的右手正面
            for keypoints in hand_results[0].keypoints.data:
                keypoints = keypoints.cpu().numpy()
                
                if len(keypoints) >= 21 and is_right_hand(keypoints) and is_palm_front_facing(keypoints):
                    # 计算手指角度
                    angles = calculate_finger_angles(keypoints)
                    
                    # 计算手掌张开度
                    openness = calculate_palm_openness(angles)
                    # 保存用于腕关节角度的手关键点
                    right_hand_kp = keypoints
                    break

        # 计算右臂角度（肩、肘、腕）
        shoulder_deg, elbow_deg, wrist_deg = None, None, None
        if pose_results[0].keypoints is not None and len(pose_results[0].keypoints.data) > 0:
            # 选择与右手最匹配的姿态（优先右腕与手腕距离最近）
            selected_pose = None
            if right_hand_kp is not None:
                wrist_xy = right_hand_kp[0][:2]
                min_dist = 1e9
                for kp in pose_results[0].keypoints.data:
                    kp = kp.cpu().numpy()
                    if len(kp) >= 17 and kp[10][2] >= 0.2:
                        d = np.linalg.norm(kp[10][:2] - wrist_xy)
                        if d < min_dist:
                            min_dist = d
                            selected_pose = kp
            if selected_pose is None:
                selected_pose = pose_results[0].keypoints.data[0].cpu().numpy()
            pose_kp = selected_pose
            shoulder_deg, elbow_deg, wrist_deg = compute_right_arm_angles(pose_kp, right_hand_kp)
            # 每10帧输出一次角度信息，避免日志过多
            frame_idx += 1
            if frame_idx % 10 == 0:
                s_txt = f"{shoulder_deg:.1f}°" if shoulder_deg is not None else "N/A"
                e_txt = f"{elbow_deg:.1f}°" if elbow_deg is not None else "N/A"
                w_txt = f"{wrist_deg:.1f}°" if wrist_deg is not None else "N/A"
                print(f"右臂角度 -> 肩:{s_txt} 肘:{e_txt} 腕:{w_txt}")

        # 发送控制指令（WebSocket），受开关控制
        if controller is not None and ui_state.control_enabled:
            try:
                if use_ttk and ttk_panel is not None:
                    # 同步屏蔽节点与复位请求
                    controller.block_nodes = ttk_panel.get_block_nodes()
                    if ttk_panel.consume_reset_request():
                        controller.reset_state()
                        controller.send_reset()
                controller.update_and_send(openness, shoulder_deg, elbow_deg, wrist_deg)
            except Exception as e:
                print(f"控制发送异常: {e}")

        # 可视化结果（手部 + 右臂骨骼 + 角度）
        vis_frame = visualize_hand_detection(frame, hand_results, angles, openness)
        vis_frame = draw_right_arm_skeleton(vis_frame, pose_kp if 'pose_kp' in locals() else None, right_hand_kp)
        vis_frame = visualize_arm_angles(vis_frame, shoulder_deg, elbow_deg, wrist_deg)
        
        # 根据GUI模式选择绘制/显示
        if use_ttk and ttk_panel is not None:
            # 在Tk界面左侧显示原始合成图像（不再在图像中追加右侧面板）
            vis_frame_tk = draw_hud(vis_frame, camera_switcher, ui_state, controller)
            ttk_panel.update_frame(vis_frame_tk)
            ttk_panel.update_info(shoulder_deg, elbow_deg, wrist_deg, openness)
            ttk_panel.update_logs(getattr(controller, 'logs', []))
            # 同步参数（滑杆/开关）
            if controller is not None:
                try:
                    params = ttk_panel.get_filter_params()
                    controller.deadband_pct = float(params.get('deadband_pct', controller.deadband_pct))
                    controller.max_step_pct = float(params.get('max_step_pct', controller.max_step_pct))
                    controller.smoothing_alpha = float(params.get('smoothing_alpha', controller.smoothing_alpha))
                    controller.rel_threshold_pct = float(params.get('rel_threshold_pct', controller.rel_threshold_pct))
                    controller.rel_step_pct = float(params.get('rel_step_pct', controller.rel_step_pct))
                    safe = ttk_panel.get_safe_params()
                    controller.safe_center_pct = float(safe.get('center_pct', controller.safe_center_pct))
                    controller.safe_shrink_pct = float(safe.get('shrink_pct', controller.safe_shrink_pct))
                    limits = ttk_panel.get_limits_params()
                    for k, (mn, mx) in limits.items():
                        mn = float(max(0.0, min(100.0, mn)))
                        mx = float(max(0.0, min(100.0, mx)))
                        if mn > mx:
                            mn, mx = mx, mn
                        controller.limits_pct[k] = [mn, mx]
                except Exception:
                    pass
            # 同步控制开关
            ui_state.control_enabled = ttk_panel.get_control_enabled()
        else:
            # 右侧信息面板（角度/开合/日志）
            vis_frame = draw_right_panel(vis_frame, controller, shoulder_deg, elbow_deg, wrist_deg, openness)
            # 绘制HUD（仅摄像头信息 + 控制开关）
            vis_frame = draw_hud(vis_frame, camera_switcher, ui_state, controller)
            # 显示结果（OpenCV窗口）
            cv2.imshow(window_name, vis_frame)
        
        # 保存输出
        if out is not None:
            out.write(vis_frame)
        
        # 事件处理
        if use_ttk and ttk_panel is not None:
            ttk_panel.tick()
            key = 0
        else:
            key = cv2.waitKey(1) & 0xFF
        
        if key == ord('q') or key == 27:  # 'q' 或 ESC 退出
            break
        elif key != 255:  # 有按键按下
            handled = handle_camera_switch_keys(key, camera_switcher)
            if not handled:
                # 其他按键处理
                if key == ord('s'):
                    # 截图功能
                    timestamp = time.strftime("%Y%m%d_%H%M%S")
                    screenshot_path = f"screenshot_{timestamp}.jpg"
                    cv2.imwrite(screenshot_path, vis_frame)
                    print(f"截图已保存: {screenshot_path}")
                elif key == ord('r') and controller is not None:
                    controller.send_reset()
    
    # 释放资源
    camera_switcher.release()
    if out is not None:
        out.release()
    cv2.destroyAllWindows()


def handle_camera_switch_keys(key: int, camera_switcher: CameraSwitcher) -> bool:
    """
    处理摄像头切换按键
    
    Args:
        key: 按键码
        camera_switcher: 摄像头切换器
        
    Returns:
        是否处理了按键
    """
    # 数字键 1-9 切换到对应摄像头
    if ord('1') <= key <= ord('9'):
        camera_id = key - ord('1')  # 0-8
        available_cameras = camera_switcher.get_available_cameras()
        if camera_id < len(available_cameras):
            target_id = available_cameras[camera_id]['id']
            camera_switcher.switch_to_camera(target_id)
        else:
            print(f"摄像头 {camera_id + 1} 不存在")
        return True
    
    # Tab 键切换到下一个摄像头
    elif key == 9:  # Tab
        camera_switcher.switch_to_next_camera()
        return True
    
    # Shift+Tab 切换到上一个摄像头 (在某些系统上可能是不同的键码)
    elif key == 25:  # Shift+Tab (可能因系统而异)
        camera_switcher.switch_to_previous_camera()
        return True
    
    # 'n' 键切换到下一个摄像头
    elif key == ord('n'):
        camera_switcher.switch_to_next_camera()
        return True
    
    # 'p' 键切换到上一个摄像头
    elif key == ord('p'):
        camera_switcher.switch_to_previous_camera()
        return True
    
    # 'c' 键显示摄像头列表
    elif key == ord('c'):
        print("\n=== 可用摄像头列表 ===")
        available_cameras = camera_switcher.get_available_cameras()
        current_info = camera_switcher.get_current_camera_info()
        
        for i, camera in enumerate(available_cameras):
            status = " [当前]" if camera['id'] == current_info['id'] else ""
            print(f"  {i+1}. {camera['name']}{status}")
        
        print("按数字键 1-9 切换摄像头")
        print("按 'n' 下一个，'p' 上一个，'c' 显示列表")
        return True
    
    return False


class RosbridgeClient:
    def __init__(self, host: str, port: int):
        self.url = f"ws://{host}:{port}/"
        self.ws = None
        self.connected = False

    def connect(self):
        try:
            import websocket
            self.ws = websocket.create_connection(self.url, timeout=5)
            self.connected = True
            print(f"已连接 ROSBridge: {self.url}")
        except Exception as e:
            self.connected = False
            print(f"ROSBridge连接失败: {e}")

    def publish(self, topic: str, msg_obj: dict) -> bool:
        if not self.connected or self.ws is None:
            return False
        payload = {"op": "publish", "topic": topic, "msg": msg_obj}
        try:
            self.ws.send(json.dumps(payload))
            return True
        except Exception as e:
            print(f"发布失败: {e}")
            # 连接异常时关闭，下次再尝试
            try:
                if self.ws:
                    self.ws.close()
            except Exception:
                pass
            self.connected = False
            return False

    def close(self):
        try:
            if self.ws:
                self.ws.close()
        except Exception:
            pass
        self.connected = False


class ArmController:
    def __init__(self, config: dict):
        self.cfg = config
        rb = self.cfg.get("rosbridge", {})
        self.client = RosbridgeClient(rb.get("host", "172.17.20.234"), int(rb.get("port", 9090)))
        self.client.connect()
        self.last_send_ts = 0.0
        self.send_interval = float(self.cfg.get("send_interval_ms", 100)) / 1000.0
        # UI展示所需的最近指令信息
        self.last_positions_text = ""
        self.last_msg = None
        self.last_publish_ok = False
        # 日志与面板状态
        self.logs = []
        self.max_logs = int(self.cfg.get("max_logs", 30))
        self.current_openness = 0.0
        self.current_angles = {"shoulder": None, "elbow": None, "wrist": None}
        # 控制滤波与相对控制参数
        filters = self.cfg.get("control_filters", {})
        self.deadband_pct = float(filters.get("deadband_pct", 2.0))
        self.max_step_pct = float(filters.get("max_step_pct", 5.0))
        self.smoothing_alpha = float(filters.get("smoothing_alpha", 0.3))
        self.rel_threshold_pct = float(filters.get("rel_threshold_pct", 5.0))
        self.rel_step_pct = float(filters.get("rel_step_pct", 5.0))
        self.spike_threshold_pct = float(filters.get("spike_threshold_pct", self.max_step_pct))
        # 安全区域与上下限
        safe = self.cfg.get("safe_zone", {})
        self.safe_center_pct = float(safe.get("center_pct", 50.0))
        self.safe_shrink_pct = float(safe.get("shrink_pct", 0.2))  # 0~1，越大压缩越明显
        self.limits_pct = safe.get("limits_pct", {
            "gripper": [0.0, 100.0],
            "shoulder": [0.0, 100.0],
            "elbow": [0.0, 100.0],
            "wrist": [0.0, 100.0],
        })
        # 屏蔽节点控制
        self.block_nodes = {"gripper": False, "shoulder": False, "elbow": False, "wrist": False}
        # 相对开合状态
        self.prev_meas_open_pct = 0.0
        self.command_grip_pct = self.safe_center_pct
        # 关节命令状态（百分比）
        self.command_joint_pct = {"shoulder": None, "elbow": None, "wrist": None}

    def percent_to_degree(self, pct: float, servo_id: int) -> float:
        r = self.cfg["ranges"][str(servo_id)]
        deg = r["min"] + pct / 100.0 * (r["max"] - r["min"])
        return float(np.clip(deg, r["min"], r["max"]))

    def angle_to_percent(self, angle: Optional[float], detect_range: dict) -> Optional[float]:
        if angle is None:
            return None
        mn, mx = detect_range["min"], detect_range["max"]
        pct = (angle - mn) / (mx - mn) * 100.0
        return float(np.clip(pct, 0.0, 100.0))

    def send_reset(self):
        reset = self.cfg.get("reset_command")
        if not reset:
            print("未配置复位指令")
            return
        try:
            payload = reset if isinstance(reset, dict) else json.loads(reset)
            if self.client.connected:
                self.client.ws.send(json.dumps(payload))
                print("复位指令已发送")
            else:
                print("ROSBridge未连接，无法发送复位")
        except Exception as e:
            print(f"复位发送失败: {e}")

    def reset_state(self):
        """复位控制内部状态到安全中心"""
        self.prev_meas_open_pct = 0.0
        self.command_grip_pct = float(self.safe_center_pct)
        self.command_joint_pct = {"shoulder": None, "elbow": None, "wrist": None}

    def _apply_bounds_and_shrink(self, pct: float, name: str) -> float:
        """对百分比应用上下限并向中心安全区域按比例缩小"""
        limits = self.limits_pct.get(name, [0.0, 100.0])
        pct = float(np.clip(pct, limits[0], limits[1]))
        c = float(self.safe_center_pct)
        k = float(self.safe_shrink_pct)
        k = max(0.0, min(1.0, k))
        return c + (pct - c) * (1.0 - k)

    def update_and_send(self, openness_pct: float, shoulder_deg: Optional[float], elbow_deg: Optional[float], wrist_deg: Optional[float]):
        now_ts = time.time()
        if now_ts - self.last_send_ts < self.send_interval:
            return
        self.last_send_ts = now_ts

        positions = []
        # --- 抓夹：相对开合状态机制 + 去抖/限幅 ---
        grip_id = int(self.cfg["mapping"]["gripper_id"])
        meas_open = float(np.clip(openness_pct, 0.0, 100.0))
        delta_meas = meas_open - self.prev_meas_open_pct
        target_grip = self.command_grip_pct
        if abs(delta_meas) >= self.rel_threshold_pct:
            step = self.rel_step_pct * (1.0 if delta_meas > 0 else -1.0)
            target_grip = self.command_grip_pct + step
        # 限幅 + 死区
        delta_cmd = target_grip - self.command_grip_pct
        if abs(delta_cmd) > self.max_step_pct:
            target_grip = self.command_grip_pct + self.max_step_pct * (1.0 if delta_cmd > 0 else -1.0)
        if abs(target_grip - self.command_grip_pct) < self.deadband_pct:
            target_grip = self.command_grip_pct
        # 上下限 + 向中心安全区压缩
        target_grip = self._apply_bounds_and_shrink(target_grip, "gripper")
        # 更新状态
        self.command_grip_pct = float(target_grip)
        self.prev_meas_open_pct = float(meas_open)
        if not self.block_nodes.get("gripper", False):
            grip_deg = self.percent_to_degree(self.command_grip_pct, grip_id)
            positions.append({"id": grip_id, "position": round(grip_deg, 2)})

        # --- 关节：平滑 + 限幅 + 死区 + 安全压缩 ---
        shoulder_pct = self.angle_to_percent(shoulder_deg, self.cfg["detect_ranges"]["shoulder"]) if shoulder_deg is not None else None
        elbow_pct = self.angle_to_percent(elbow_deg, self.cfg["detect_ranges"]["elbow"]) if elbow_deg is not None else None
        wrist_pct = self.angle_to_percent(wrist_deg, self.cfg["detect_ranges"]["wrist"]) if wrist_deg is not None else None

        def smooth_target(name: str, meas_pct: Optional[float]) -> Optional[float]:
            prev_cmd = self.command_joint_pct.get(name)
            if meas_pct is None:
                return prev_cmd
            if prev_cmd is None:
                tgt = float(meas_pct)
            else:
                tgt = float(self.smoothing_alpha * meas_pct + (1.0 - self.smoothing_alpha) * prev_cmd)
                d = tgt - prev_cmd
                if abs(d) > self.max_step_pct:
                    tgt = prev_cmd + self.max_step_pct * (1.0 if d > 0 else -1.0)
                if abs(tgt - prev_cmd) < self.deadband_pct:
                    tgt = prev_cmd
            tgt = self._apply_bounds_and_shrink(tgt, name)
            self.command_joint_pct[name] = tgt
            return tgt

        sid = int(self.cfg["mapping"]["shoulder_joint_id"])  # 关节2默认
        eid = int(self.cfg["mapping"]["elbow_joint_id"])     # 关节3默认
        wid = int(self.cfg["mapping"]["wrist_joint_id"])     # 关节4默认

        s_tgt = smooth_target("shoulder", shoulder_pct)
        e_tgt = smooth_target("elbow", elbow_pct)
        w_tgt = smooth_target("wrist", wrist_pct)

        if s_tgt is not None and not self.block_nodes.get("shoulder", False):
            positions.append({"id": sid, "position": round(self.percent_to_degree(s_tgt, sid), 2)})
        if e_tgt is not None and not self.block_nodes.get("elbow", False):
            positions.append({"id": eid, "position": round(self.percent_to_degree(e_tgt, eid), 2)})
        if w_tgt is not None and not self.block_nodes.get("wrist", False):
            positions.append({"id": wid, "position": round(self.percent_to_degree(w_tgt, wid), 2)})

        msg = {
            "position_unit": "deg",
            "position": positions,
            "duration": float(self.cfg.get("duration", 0.1))
        }
        topic = self.cfg["rosbridge"]["topic"]
        ok = self.client.publish(topic, msg)
        # 记录最近一次发送的简要信息，供HUD/面板显示
        try:
            self.last_positions_text = " ".join([f"{p['id']}:{p['position']}" for p in positions])
        except Exception:
            self.last_positions_text = str(positions)
        self.last_msg = msg
        self.last_publish_ok = bool(ok)
        # 更新当前状态（供右侧面板显示）
        self.current_openness = float(openness_pct)
        self.current_angles = {"shoulder": shoulder_deg, "elbow": elbow_deg, "wrist": wrist_deg}
        # 追加日志（保留最近N条）
        try:
            log_item = {"ts": time.strftime("%H:%M:%S"), "ok": bool(ok), "summary": self.last_positions_text}
            self.logs.append(log_item)
            if len(self.logs) > self.max_logs:
                self.logs = self.logs[-self.max_logs:]
        except Exception:
            pass
        if ok:
            # 可按需减少打印频率避免刷屏
            print(f"已发送控制: {positions}")
        else:
            print("控制发送失败")


def load_controller_config(path: str) -> dict:
    try:
        with open(path, "r", encoding="utf-8") as f:
            return json.load(f)
    except Exception as e:
        print(f"读取控制配置失败: {e}")
        # 最小默认配置作为兜底
        return {
            "rosbridge": {"host": "172.17.20.234", "port": 9090, "topic": "/servo_controller"},
            "duration": 0.1,
            "send_interval_ms": 100,
            "ranges": {"10": {"min": 0, "max": 123}, "4": {"min": -110, "max": 110}, "3": {"min": 70, "max": 120}, "2": {"min": -40, "max": 90}, "1": {"min": -120, "max": 120}},
            "mapping": {"gripper_id": 10, "shoulder_joint_id": 2, "elbow_joint_id": 3, "wrist_joint_id": 4},
            "detect_ranges": {"shoulder": {"min": 10, "max": 150}, "elbow": {"min": 0, "max": 150}, "wrist": {"min": 0, "max": 180}}
        }


# --- UI 状态与 HUD 绘制 ---
class UIState:
    def __init__(self, control_enabled: bool = True):
        self.control_enabled = control_enabled
        self.toggle_rect = (0, 0, 0, 0)


def mouse_handler(event, x, y, flags, param):
    if event == cv2.EVENT_LBUTTONDOWN and isinstance(param, UIState):
        x1, y1, x2, y2 = param.toggle_rect
        if x1 <= x <= x2 and y1 <= y <= y2:
            param.control_enabled = not param.control_enabled


def draw_hud(frame: np.ndarray, camera_switcher: CameraSwitcher, ui_state: 'UIState', controller: Optional['ArmController']) -> np.ndarray:
    h, w = frame.shape[:2]

    # 顶部左侧摄像头信息（紧凑显示）
    cam = camera_switcher.get_current_camera_info()
    cam_name = cam['name'] if cam else '未知摄像头'
    put_label_box(frame, f"摄像头: {cam_name}", (10, 10), font_size=22, text_color=(255, 255, 255), box_color=(0, 0, 0))

    # 顶部右侧控制开关按钮（可点击）
    tx, ty, tw, th = w - 150, 10, 140, 32
    ui_state.toggle_rect = (tx, ty, tx + tw, ty + th)
    box_col = (0, 150, 0) if ui_state.control_enabled else (0, 0, 150)
    cv2.rectangle(frame, (tx, ty), (tx + tw, ty + th), box_col, thickness=2)
    status = "控制发送: 开" if ui_state.control_enabled else "控制发送: 关"
    put_label_box(frame, status, (tx + 6, ty + 4), font_size=20, text_color=(255, 255, 255), box_color=(0, 0, 0))

    return frame


def draw_right_panel(frame: np.ndarray, controller: Optional['ArmController'], shoulder_deg: Optional[float], elbow_deg: Optional[float], wrist_deg: Optional[float], openness: float, panel_width: int = 360) -> np.ndarray:
    """在右侧追加信息面板：显示角度/开合与控制日志。
    返回扩展后的新帧（左侧原图，右侧面板）。
    """
    h, w = frame.shape[:2]
    full = np.zeros((h, w + panel_width, 3), dtype=np.uint8)
    full[:, :w] = frame

    x0 = w
    # 面板背景
    cv2.rectangle(full, (x0, 0), (x0 + panel_width - 1, h - 1), (40, 40, 40), thickness=-1)

    y = 12
    # 角度与开合
    full = put_label_box(full, "角度与开合", (x0 + 12, y), font_size=22, text_color=(255, 255, 255), box_color=(0, 0, 0))
    y += 36
    s_txt = f"{shoulder_deg:.1f}°" if shoulder_deg is not None else "N/A"
    e_txt = f"{elbow_deg:.1f}°" if elbow_deg is not None else "N/A"
    w_txt = f"{wrist_deg:.1f}°" if wrist_deg is not None else "N/A"
    full = put_chinese_text(full, f"右肩角度: {s_txt}", (x0 + 18, y), 22, (200, 255, 200))
    y += 28
    full = put_chinese_text(full, f"右肘角度: {e_txt}", (x0 + 18, y), 22, (200, 255, 200))
    y += 28
    full = put_chinese_text(full, f"右腕角度: {w_txt}", (x0 + 18, y), 22, (200, 255, 200))
    y += 28
    full = put_chinese_text(full, f"右手张开度: {openness:.1f}%", (x0 + 18, y), 22, (0, 255, 255))

    y += 40
    # 指令控制日志
    full = put_label_box(full, "指令控制日志", (x0 + 12, y), font_size=22, text_color=(255, 255, 255), box_color=(0, 0, 0))
    y += 34
    logs = []
    if controller is not None and hasattr(controller, "logs"):
        logs = controller.logs[-12:]
    if not logs:
        full = put_chinese_text(full, "暂无日志", (x0 + 18, y), 20, (180, 180, 180))
        return full
    for item in logs:
        ok_flag = "OK" if item.get("ok") else "ERR"
        summary = item.get("summary", "")
        ts = item.get("ts", "")
        color = (0, 255, 0) if ok_flag == "OK" else (0, 0, 255)
        full = put_chinese_text(full, f"{ts} [{ok_flag}] {summary}", (x0 + 18, y), 18, color)
        y += 24
        if y > h - 24:
            break

    return full


def parse_args():
    parser = argparse.ArgumentParser(description="右手检测和角度计算")
    parser.add_argument("--model", type=str, 
                       default="models/yolo11n-hand-kpts-gpu/weights/best.pt", 
                       help="模型文件路径")
    parser.add_argument("--pose_model", type=str,
                       default="models/yolo11n-pose.pt",
                       help="人体姿态模型路径")
    parser.add_argument("--image", type=str, help="要处理的图像文件路径")
    parser.add_argument("--video", type=int, default=0, help="摄像头索引（默认为0）")
    parser.add_argument("--width", type=int, default=1280, help="摄像头采集宽度")
    parser.add_argument("--height", type=int, default=720, help="摄像头采集高度")
    parser.add_argument("--cam_backend", type=str, default="default", choices=["default", "dshow", "msmf"], help="摄像头后端")
    parser.add_argument("--save", action="store_true", help="保存检测结果")
    parser.add_argument("--device", type=str, default="auto", help="计算设备: 'cpu'、'cuda' 或 'auto'（自动选择）")
    parser.add_argument("--controller_config", type=str, default="e:\\qwq\\xiaocar\\hand\\controller_config.json", help="控制配置文件路径")
    parser.add_argument("--ros_disable", action="store_true", help="禁用ROSBridge控制（仅视觉）")
    parser.add_argument("--gui", type=str, default="none", choices=["none", "ttk"], help="GUI模式：none使用OpenCV窗口，ttk使用ttkbootstrap窗口")
    return parser.parse_args()


def auto_device(user_device: str) -> str:
    """
    自动选择计算设备
    """
    if user_device != "auto":
        return user_device
    try:
        if torch.cuda.is_available():
            return "cuda"
    except Exception:
        pass
    return "cpu"


def main():
    args = parse_args()
    
    # 选择设备
    device = auto_device(args.device)
    print(f"使用计算设备: {device}")
    
    # 加载模型
    print(f"正在加载模型: {args.model}")
    try:
        hand_model = YOLO(args.model)
        hand_model.to(device)
        print("模型加载成功")
    except Exception as e:
        print(f"模型加载失败: {e}")
        return

    # 加载人体姿态模型（用于右臂角度计算）
    print(f"正在加载人体姿态模型: {args.pose_model}")
    try:
        pose_model = YOLO(args.pose_model)
        pose_model.to(device)
        print("人体姿态模型加载成功")
    except Exception as e:
        print(f"人体姿态模型加载失败: {e}")
        return
    
    print("=" * 50)
    print("右手检测和角度计算程序")
    print("本程序只识别和处理右手")
    print("=" * 50)
    
    controller = None
    if not args.ros_disable:
        try:
            cfg = load_controller_config(args.controller_config)
            controller = ArmController(cfg)
        except Exception as e:
            print(f"控制器初始化失败: {e}")
    else:
        print("ROSBridge控制已禁用，仅进行视觉检测")

    # 处理输入
    if args.image:
        # 处理单张图像
        print(f"处理图像: {args.image}")
        process_image(hand_model, pose_model, args.image, args.save)
    else:
        # 处理视频流
        print("启动摄像头检测...")
        use_ttk = (args.gui == "ttk")
        process_video(hand_model, pose_model, args.video, args.save, controller, preferred_width=args.width, preferred_height=args.height, cam_backend=args.cam_backend, use_ttk=use_ttk)


if __name__ == "__main__":
    main()