"""
儿童学习注意力检测模块

专为台灯硬件载体场景设计的智能注意力检测系统，基于YOLOv11姿态估计和物品检测模型。
针对儿童写作业场景进行深度优化，提供全方位的学习行为分析和智能管理。

核心功能：
- 上半身姿态关键点检测（头部、肩膀、手臂、手腕）
- 多维度注意力特征分析（头部姿态、手部活动、身体稳定性）
- 身体特征标签检测（坐姿标准性、头部扭动歪曲、身体对称性）
- 智能物品检测与管理（允许/禁止物品识别、使用时长监控）
- 行为智能识别（短暂喝水、伸懒腰、调整坐姿等合理行为）
- 学习状态分类（专注学习、分心、疲劳/睡觉、离开、物品分心）
- 干扰行为检测（东张西望、手机互动、长时间静止、禁止物品）
- 时序行为分析（基于历史帧的行为模式识别）
- 配置化管理系统（支持个性化物品和行为规则定制）

使用方法：

1. 基本使用示例：
```python
import cv2
from kidsbuddy.vision.attention_detector import AttentionDetector

# 初始化检测器
detector = AttentionDetector(
    model_path="yolo11n-pose.pt",           # YOLO模型路径
    conf_threshold=0.3,                     # 关键点置信度阈值
    history_window=30,                      # 历史帧窗口大小
    movement_threshold=5.0,                 # 运动检测阈值
    stability_threshold=90,                 # 静止状态判断阈值（帧数）
    behavior_config_path="behavior_config.json",  # 行为配置文件路径
    enable_object_detection=True            # 启用物品检测
)

# 处理单帧图像
frame = cv2.imread("student_image.jpg")
status, reasons, keypoints_data, features = detector.process_frame(frame)

print(f"注意力状态: {status}")
print(f"判断原因: {reasons}")
print(f"专注度评分: {features.get('focus_score', 'N/A')}")

# 获取身体特征标签
posture_labels = detector.get_posture_labels(features)
print(f"坐姿标准性: {posture_labels.get('posture_label', 'N/A')}")
print(f"头部姿态: {posture_labels.get('head_label', 'N/A')}")
print(f"头部倾斜状态: {posture_labels.get('head_tilt_status', 'N/A')}")
print(f"头部扭转状态: {posture_labels.get('head_twist_status', 'N/A')}")

# 检查物品检测结果
object_analysis = keypoints_data.get('object_analysis', {})
if object_analysis.get('warnings'):
    print(f"物品警告: {object_analysis['warnings']}")
if object_analysis.get('suggestions'):
    print(f"使用建议: {object_analysis['suggestions']}")
```

2. 实时摄像头检测：
```python
import cv2
from kidsbuddy.vision.attention_detector import AttentionDetector

detector = AttentionDetector()
cap = cv2.VideoCapture(0)

while True:
    ret, frame = cap.read()
    if not ret:
        break

    # 检测注意力状态
    status, reasons, keypoints_data, features = detector.process_frame(frame)

    # 在图像上显示结果
    cv2.putText(frame, f"Status: {status}", (10, 30),
                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

    # 显示关键点（如果检测到）
    if keypoints_data['kpts'] is not None:
        kpts = keypoints_data['kpts']
        confs = keypoints_data['confs']
        for i, (kpt, conf) in enumerate(zip(kpts, confs)):
            if conf > 0.5:  # 只显示高置信度的关键点
                cv2.circle(frame, (int(kpt[0]), int(kpt[1])), 3, (0, 0, 255), -1)

    cv2.imshow('Attention Detection', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()
```

3. 批量处理和统计：
```python
detector = AttentionDetector()
attention_states = []

# 处理多帧
for frame in video_frames:
    status, reasons, _, features = detector.process_frame(frame)
    attention_states.append(status)

# 获取注意力摘要
summary = detector.get_attention_summary(window_size=100)
print(f"专注度百分比: {summary['focus_percentage']:.1f}%")
print(f"状态分布: {summary['state_distribution']}")
```

4. 高级配置：
```python
# 针对不同年龄段的儿童调整参数
detector_young = AttentionDetector(
    conf_threshold=0.25,      # 降低阈值，适应儿童较小的关键点
    movement_threshold=3.0,   # 降低运动阈值，儿童动作较小
    stability_threshold=60    # 缩短静止判断时间
)

# 重置历史数据（开始新的检测会话）
detector.reset_history()
```

5. 身体特征标签检测：
```python
detector = AttentionDetector()
frame = cv2.imread("student_image.jpg")
status, reasons, keypoints_data, features = detector.process_frame(frame)

# 获取身体特征标签
posture_labels = detector.get_posture_labels(features)

print(f"坐姿标准性: {posture_labels.get('posture_label', 'N/A')}")
print(f"头部姿态: {posture_labels.get('head_label', 'N/A')}")
print(f"头部倾斜状态: {posture_labels.get('head_tilt_status', 'N/A')}")
print(f"头部扭转状态: {posture_labels.get('head_twist_status', 'N/A')}")
print(f"身体对称性: {posture_labels.get('symmetry_label', 'N/A')}")

# 获取详细数值特征
print(f"坐姿评分: {features.get('posture_score', 'N/A'):.2f}")
print(f"头部倾斜角度: {features.get('head_tilt', 'N/A'):.1f}°")
print(f"头部扭转角度: {features.get('head_twist', 'N/A'):.1f}°")
print(f"身体对称性评分: {features.get('body_symmetry', 'N/A'):.2f}")
```

6. 配置管理和物品检测：
```python
detector = AttentionDetector(
    behavior_config_path="behavior_config.json",
    enable_object_detection=True
)

# 动态添加允许的物品
detector.behavior_config.add_allowed_object(
    "calculator", "计算器", max_duration=120, description="数学计算工具"
)

# 动态添加禁止的物品
detector.behavior_config.add_forbidden_object(
    "comic", "漫画书", severity="medium", description="娱乐读物分心"
)

# 检查物品是否允许
is_allowed, obj_info = detector.behavior_config.is_object_allowed("water_bottle")
print(f"水杯: {'允许' if is_allowed else '禁止'} - {obj_info['description']}")

# 保存配置
detector.behavior_config.save_config("updated_config.json")

# 处理图像并获取完整分析
status, reasons, keypoints_data, features = detector.process_frame(frame)

# 获取物品检测结果
object_analysis = keypoints_data.get('object_analysis', {})
detected_objects = keypoints_data.get('detected_objects', [])

print(f"检测到的物品: {[obj['class_name'] for obj in detected_objects]}")
if object_analysis.get('warnings'):
    print(f"物品警告: {object_analysis['warnings']}")
if object_analysis.get('suggestions'):
    print(f"使用建议: {object_analysis['suggestions']}")
```

返回值说明：
- status (str): 注意力状态 ["专注学习", "基本专注", "轻微分心", "明显分心",
                            "严重分心", "睡觉/疲劳", "注意力不集中", "物品分心",
                            "无法判断", "未检测到人"]
- reasons (List[str]): 详细判断原因列表（包含物品检测和行为分析结果）
- keypoints_data (Dict): 关键点数据，包含：
  * 'kpts', 'confs', 'bbox': 姿态检测数据
  * 'detected_objects': 检测到的物品列表
  * 'object_analysis': 物品分析结果（警告、建议）
  * 'allowed_behaviors': 检测到的允许行为列表
- features (Dict): 学习行为特征，包含各种姿态和活动指标：
  * 原有特征：头部姿态、手部活动、身体姿态、注意力评分
  * 新增身体特征：
    - 坐姿标准性：'posture_score', 'posture_grade' (坐姿评分和等级)
    - 头部扭动检测：'head_tilt', 'head_twist' (头部倾斜和扭转角度)
    - 身体对称性：'body_symmetry', 'symmetry_grade' (对称性评分和等级)
    - 脊柱直立度：'spine_straightness' (脊柱直立程度)
    - 特征等级标签：'posture_grade', 'head_grade', 'symmetry_grade' (0-4等级评定)

注意事项：
1. 确保摄像头能够清晰捕捉到儿童的上半身和学习环境
2. 光照条件要适中，避免过暗或过亮，以确保物品检测准确性
3. 建议在检测前调用 reset_history() 清除历史数据
4. 连续检测时，历史数据会自动累积，提高判断准确性
5. 模型文件需要预先下载或训练好的 YOLOv11 姿态估计模型和物品检测模型
6. 物品检测功能依赖于YOLO模型的检测能力，可能存在误检或漏检
7. 行为配置文件支持热加载，可在运行时动态调整规则
8. 建议根据不同年龄段和学习场景调整配置参数
9. 物品使用时长和行为频率会自动监控，超时会给出提醒
10. 配置文件格式为JSON，支持中文物品名称和描述
"""

import math
import time
from collections import deque
from typing import Dict, List, Optional, Tuple, Any
import numpy as np
from ultralytics import YOLO
import json
import os

# COCO关键点名称列表，与YOLO姿态模型输出顺序一致
COCO_KP_NAMES = [
    'nose', 'left_eye', 'right_eye', 'left_ear', 'right_ear',
    'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow',
    'left_wrist', 'right_wrist', 'left_hip', 'right_hip',
    'left_knee', 'right_knee', 'left_ankle', 'right_ankle'
]

# 将关键点名称映射到索引
COCO_KP_IDX = {name: i for i, name in enumerate(COCO_KP_NAMES)}

# 用于绘制骨骼的关键点对 (基于COCO_KP_IDX的索引)
SKELETON_PAIRS = [
    (COCO_KP_IDX['nose'], COCO_KP_IDX['left_eye']),
    (COCO_KP_IDX['nose'], COCO_KP_IDX['right_eye']),
    (COCO_KP_IDX['left_eye'], COCO_KP_IDX['left_ear']),
    (COCO_KP_IDX['right_eye'], COCO_KP_IDX['right_ear']),
    # Torso
    (COCO_KP_IDX['left_shoulder'], COCO_KP_IDX['right_shoulder']),
    (COCO_KP_IDX['left_shoulder'], COCO_KP_IDX['left_hip']),
    (COCO_KP_IDX['right_shoulder'], COCO_KP_IDX['right_hip']),
    (COCO_KP_IDX['left_hip'], COCO_KP_IDX['right_hip']),
    # Arms
    (COCO_KP_IDX['left_shoulder'], COCO_KP_IDX['left_elbow']),
    (COCO_KP_IDX['left_elbow'], COCO_KP_IDX['left_wrist']),
    (COCO_KP_IDX['right_shoulder'], COCO_KP_IDX['right_elbow']),
    (COCO_KP_IDX['right_elbow'], COCO_KP_IDX['right_wrist']),
    # Legs
    (COCO_KP_IDX['left_hip'], COCO_KP_IDX['left_knee']),
    (COCO_KP_IDX['left_knee'], COCO_KP_IDX['left_ankle']),
    (COCO_KP_IDX['right_hip'], COCO_KP_IDX['right_knee']),
    (COCO_KP_IDX['right_knee'], COCO_KP_IDX['right_ankle'])
]


class BehaviorConfig:
    """
    学习行为配置类

    用于配置哪些物品是允许的、哪些姿势是合理的、哪些行为是可接受的。
    支持从配置文件加载和动态修改配置。
    """

    def __init__(self, config_path: Optional[str] = None):
        """
        初始化行为配置

        参数:
            config_path: 配置文件路径，如果为None则使用默认配置
        """
        # 默认配置
        self.default_config = {
            # 允许的物品（不会触发分心警告）
            "allowed_objects": {
                "bottle": {"name": "水杯", "max_duration": 30, "description": "短暂喝水"},
                "cup": {"name": "杯子", "max_duration": 30, "description": "短暂喝水"},
                "book": {"name": "书本", "max_duration": 3600, "description": "学习用书"},
                "pen": {"name": "笔", "max_duration": 3600, "description": "书写工具"},
                "pencil": {"name": "铅笔", "max_duration": 3600, "description": "书写工具"},
                "eraser": {"name": "橡皮", "max_duration": 60, "description": "擦除错误"},
                "ruler": {"name": "尺子", "max_duration": 120, "description": "测量工具"},
                "calculator": {"name": "计算器", "max_duration": 300, "description": "计算工具"},
                "notebook": {"name": "笔记本", "max_duration": 3600, "description": "记录笔记"},
                "paper": {"name": "纸张", "max_duration": 3600, "description": "书写纸张"},
                "desk": {"name": "桌子", "max_duration": 3600, "description": "学习桌"},
                "chair": {"name": "椅子", "max_duration": 3600, "description": "学习椅"},
                "lamp": {"name": "台灯", "max_duration": 3600, "description": "照明设备"}
            },

            # 禁止的物品（会触发分心警告）
            "forbidden_objects": {
                "cell phone": {"name": "手机", "severity": "high", "description": "电子设备分心"},
                "mobile phone": {"name": "手机", "severity": "high", "description": "电子设备分心"},
                "phone": {"name": "手机", "severity": "high", "description": "电子设备分心"},
                "laptop": {"name": "笔记本电脑", "severity": "high", "description": "电子设备分心"},
                "computer": {"name": "电脑", "severity": "high", "description": "电子设备分心"},
                "tablet": {"name": "平板", "severity": "high", "description": "电子设备分心"},
                "keyboard": {"name": "键盘", "severity": "high", "description": "写作业时不应使用键盘"},
                "mouse": {"name": "鼠标", "severity": "high", "description": "写作业时不应使用鼠标"},
                "toy": {"name": "玩具", "severity": "medium", "description": "玩具分心"},
                "game": {"name": "游戏", "severity": "high", "description": "游戏分心"},
                "headphones": {"name": "耳机", "severity": "medium", "description": "音频设备分心"},
                "earphones": {"name": "耳机", "severity": "medium", "description": "音频设备分心"}
            },

            # 允许的姿势行为
            "allowed_behaviors": {
                "stretch": {
                    "name": "伸懒腰",
                    "max_duration": 20,  # 最大持续时间（秒）
                    "max_frequency": 3,  # 每小时最大次数
                    "description": "短暂伸展放松",
                    "detection_criteria": {
                        "arm_raise_angle": 120,  # 手臂抬起角度
                        "duration_threshold": 5   # 持续时间阈值
                    }
                },
                "drink_water": {
                    "name": "喝水",
                    "max_duration": 30,
                    "max_frequency": 5,
                    "description": "补充水分",
                    "detection_criteria": {
                        "hand_to_mouth": True,
                        "head_tilt_back": 15
                    }
                },
                "adjust_posture": {
                    "name": "调整坐姿",
                    "max_duration": 10,
                    "max_frequency": 10,
                    "description": "调整身体姿势",
                    "detection_criteria": {
                        "body_movement": True,
                        "return_to_study": True
                    }
                }
            },

            # 不合理的姿势行为
            "forbidden_behaviors": {
                "lying_down": {
                    "name": "趴桌子",
                    "severity": "high",
                    "description": "不良学习姿势",
                    "detection_criteria": {
                        "head_too_low": -20,
                        "duration_threshold": 30
                    }
                },
                "excessive_movement": {
                    "name": "过度活动",
                    "severity": "medium",
                    "description": "注意力不集中",
                    "detection_criteria": {
                        "movement_frequency": 0.8,
                        "duration_threshold": 60
                    }
                },
                "looking_around": {
                    "name": "东张西望",
                    "severity": "medium",
                    "description": "视线分散",
                    "detection_criteria": {
                        "head_yaw_threshold": 30,
                        "frequency_threshold": 5
                    }
                }
            },

            # 时间阈值配置
            "time_thresholds": {
                "short_break_max": 30,      # 短暂休息最大时长（秒）
                "medium_break_max": 120,    # 中等休息最大时长（秒）
                "distraction_threshold": 180,  # 分心行为警告阈值（秒）
                "evaluation_window": 300    # 行为评估时间窗口（秒）
            },

            # 严重程度配置
            "severity_levels": {
                "low": {"score": 0.1, "color": (0, 255, 255), "action": "提醒"},
                "medium": {"score": 0.3, "color": (0, 165, 255), "action": "警告"},
                "high": {"score": 0.6, "color": (0, 0, 255), "action": "严重警告"}
            }
        }

        # 加载配置
        self.config = self.default_config.copy()
        if config_path and os.path.exists(config_path):
            self.load_config(config_path)

    def load_config(self, config_path: str) -> bool:
        """
        从文件加载配置

        参数:
            config_path: 配置文件路径

        返回:
            bool: 是否加载成功
        """
        try:
            with open(config_path, 'r', encoding='utf-8') as f:
                user_config = json.load(f)

            # 合并用户配置到默认配置
            self._merge_config(self.config, user_config)
            return True
        except Exception as e:
            print(f"配置文件加载失败: {e}")
            return False

    def save_config(self, config_path: str) -> bool:
        """
        保存配置到文件

        参数:
            config_path: 配置文件路径

        返回:
            bool: 是否保存成功
        """
        try:
            with open(config_path, 'w', encoding='utf-8') as f:
                json.dump(self.config, f, ensure_ascii=False, indent=2)
            return True
        except Exception as e:
            print(f"配置文件保存失败: {e}")
            return False

    def _merge_config(self, base_config: dict, user_config: dict) -> None:
        """递归合并配置"""
        for key, value in user_config.items():
            if key in base_config and isinstance(base_config[key], dict) and isinstance(value, dict):
                self._merge_config(base_config[key], value)
            else:
                base_config[key] = value

    def add_allowed_object(self, object_id: str, name: str, max_duration: int = 60, description: str = "") -> None:
        """添加允许的物品"""
        self.config["allowed_objects"][object_id] = {
            "name": name,
            "max_duration": max_duration,
            "description": description
        }

    def add_forbidden_object(self, object_id: str, name: str, severity: str = "medium", description: str = "") -> None:
        """添加禁止的物品"""
        self.config["forbidden_objects"][object_id] = {
            "name": name,
            "severity": severity,
            "description": description
        }

    def is_object_allowed(self, object_name: str) -> Tuple[bool, dict]:
        """检查物品是否允许"""
        object_name_lower = object_name.lower()

        # 检查允许列表
        for obj_id, obj_info in self.config["allowed_objects"].items():
            if obj_id.lower() in object_name_lower or object_name_lower in obj_id.lower():
                return True, obj_info

        # 检查禁止列表
        for obj_id, obj_info in self.config["forbidden_objects"].items():
            if obj_id.lower() in object_name_lower or object_name_lower in obj_id.lower():
                return False, obj_info

        # 未知物品，默认为中等警告
        return False, {"name": object_name, "severity": "medium", "description": "未知物品"}

    def get_behavior_config(self, behavior_type: str) -> Optional[dict]:
        """获取行为配置"""
        allowed = self.config["allowed_behaviors"].get(behavior_type)
        if allowed:
            return {"type": "allowed", **allowed}

        forbidden = self.config["forbidden_behaviors"].get(behavior_type)
        if forbidden:
            return {"type": "forbidden", **forbidden}

        return None


class AttentionDetector:
    """
    儿童学习注意力检测器

    专为台灯场景下的儿童写作业行为分析设计。通过分析上半身姿态、
    头部动作、手部活动等多维度特征，准确判断学习注意力状态。
    """

    def __init__(
        self,
        model_path: str = "yolo11n-pose.pt",
        conf_threshold: float = 0.3,
        history_window: int = 30,  # 历史帧窗口大小（约1秒@30fps）
        movement_threshold: float = 5.0,  # 运动检测阈值（像素）
        stability_threshold: int = 90,  # 稳定性检测阈值（帧数，约3秒）
        behavior_config_path: Optional[str] = None,  # 行为配置文件路径
        enable_object_detection: bool = True  # 是否启用物品检测
    ) -> None:
        """
        初始化儿童学习注意力检测器

        参数:
            model_path: YOLOv11姿态估计模型的路径
            conf_threshold: 关键点置信度阈值
            history_window: 历史帧窗口大小，用于时序分析
            movement_threshold: 运动检测阈值（像素距离）
            stability_threshold: 判断静止状态的帧数阈值
            behavior_config_path: 行为配置文件路径
            enable_object_detection: 是否启用物品检测

        异常:
            Exception: 当无法加载YOLO模型时抛出
        """
        try:
            self.model = YOLO(model_path)
        except Exception as e:
            print(f"错误：无法加载YOLO模型 {model_path}。请确保模型文件存在且Ultralytics已正确安装。")
            print(f"错误详情: {e}")
            raise

        # 初始化行为配置
        self.behavior_config = BehaviorConfig(behavior_config_path)
        self.enable_object_detection = enable_object_detection

        # 如果启用物品检测，尝试加载物品检测模型
        self.object_model = None
        if enable_object_detection:
            try:
                # 使用通用的YOLO模型进行物品检测
                object_model_path = model_path.replace(
                    '-pose', '') if '-pose' in model_path else model_path
                self.object_model = YOLO(object_model_path)
            except Exception as e:
                print(f"警告：无法加载物品检测模型，将禁用物品检测功能: {e}")
                self.enable_object_detection = False

        self.conf_threshold = conf_threshold
        self.kp_names = COCO_KP_NAMES
        self.kp_idx = COCO_KP_IDX
        self.skeleton_pairs = SKELETON_PAIRS

        # 时序分析参数
        self.history_window = history_window
        self.movement_threshold = movement_threshold
        self.stability_threshold = stability_threshold

        # 历史数据存储
        self.head_positions = deque(maxlen=history_window)
        self.hand_positions = deque(maxlen=history_window)
        self.shoulder_positions = deque(maxlen=history_window)
        self.attention_history = deque(maxlen=history_window)

        # 状态跟踪
        self.last_movement_time = time.time()
        self.static_frame_count = 0
        self.distraction_count = 0
        self._consecutive_yaw_count = 0  # 连续头部偏转计数

        # 行为跟踪
        self.behavior_history = deque(maxlen=history_window * 10)  # 更长的行为历史
        self.object_detection_history = deque(maxlen=history_window)
        self.allowed_behavior_start_time = {}  # 记录允许行为的开始时间
        self.forbidden_object_warnings = {}  # 记录禁止物品的警告
        
        # 新增：物品交互跟踪
        self.object_interaction_history = {}  # 记录物品与人的交互历史
        self.object_position_history = {}     # 记录物品位置历史
        self.object_interaction_threshold = 150  # 物品交互距离阈值（像素）
        self.object_usage_tracking = {}       # 物品实际使用跟踪

        # 新增：行为检测的历史状态，用于提高一致性
        self.behavior_state = {
            'stretch': False,      # 伸懒腰状态
            'drink_water': False,  # 喝水状态
            'reading': False,      # 阅读状态
            'writing': False       # 写字状态
        }
        self.behavior_state_history = {k: deque(maxlen=5) for k in self.behavior_state}
        self.behavior_duration = {k: 0 for k in self.behavior_state}
        self.behavior_confidence = {k: 0.0 for k in self.behavior_state}  # 行为置信度

        # 新增：学习场景上下文跟踪
        self.study_context = {
            'desk_region': None,      # 学习桌区域
            'active_study_zone': None, # 活跃学习区域
            'hand_activity_zone': None # 手部活动区域
        }

        # 新增：时序行为验证
        self.temporal_behavior_validator = {
            'stretch_sequence': deque(maxlen=10),    # 伸懒腰动作序列
            'drink_sequence': deque(maxlen=8),       # 喝水动作序列
            'study_sequence': deque(maxlen=15)       # 学习动作序列
        }

    def _extract_keypoints(
        self, frame: np.ndarray
    ) -> Tuple[Optional[np.ndarray], Optional[np.ndarray], Optional[np.ndarray], int]:
        """
        从单帧图像中提取关键点，支持多人检测

        参数:
            frame: 输入图像帧 (NumPy array)

        返回:
            Tuple[Optional[np.ndarray], Optional[np.ndarray], Optional[np.ndarray], int]:
                - scaled_kpts: [K, 2] NumPy array, 检测到的主要人物的关键点图像坐标(x,y)
                - confidences: [K] NumPy array, 对应关键点的置信度
                - bbox: [4] NumPy array, 边界框坐标 [x1, y1, x2, y2] (如果检测到)
                - person_count: int, 检测到的人数
        """
        try:
            detections_list = self.model(
                frame, verbose=False)  # verbose=False 关闭冗余输出

            if not detections_list:  # model() returns a list of Results objects
                return None, None, None, 0

            result = detections_list[0]  # Process the first Results object

            # Check if keypoints exist and have valid shape
            if (hasattr(result, 'keypoints') and
                result.keypoints is not None and
                hasattr(result.keypoints, 'xy') and
                    result.keypoints.xy is not None):

                # Get keypoints tensor shape
                kpts_shape = result.keypoints.xy.shape
                person_count = kpts_shape[0] if len(kpts_shape) >= 2 else 0

                # Check if we have at least one person detected
                if person_count > 0:
                    # 选择最大的人物作为主要检测对象（通常是离摄像头最近的学生）
                    main_person_idx = 0
                    if person_count > 1 and hasattr(result, 'boxes') and result.boxes is not None:
                        # 根据边界框面积选择最大的人物
                        boxes = result.boxes.xyxy.cpu().numpy()
                        max_area = 0
                        for i, box in enumerate(boxes):
                            if len(box) >= 4:
                                area = (box[2] - box[0]) * (box[3] - box[1])
                                if area > max_area:
                                    max_area = area
                                    main_person_idx = i

                    # Process the main detected person
                    # Shape (K, 2)
                    person_kpts = result.keypoints.xy[main_person_idx].cpu().numpy()

                    # Check if confidence data is available
                    if (hasattr(result.keypoints, 'conf') and
                            result.keypoints.conf is not None):
                        # Shape (K,)
                        person_confidences = result.keypoints.conf[main_person_idx].cpu().numpy()
                    else:
                        # If no confidence data, create default confidence array
                        person_confidences = np.ones(person_kpts.shape[0]) * 0.5

                    # The keypoints are already in pixel coordinates
                    scaled_kpts = person_kpts.copy()

                    person_bbox = None
                    if (hasattr(result, 'boxes') and
                        result.boxes is not None and
                        hasattr(result.boxes, 'xyxy') and
                            result.boxes.xyxy.shape[0] > main_person_idx):
                        # Take [x1, y1, x2, y2]
                        person_bbox = result.boxes.xyxy[main_person_idx].cpu().numpy()

                    return scaled_kpts, person_confidences, person_bbox, person_count

            return None, None, None, 0

        except Exception as e:
            # 如果检测过程中出现任何错误，返回空结果
            print(f"关键点提取过程中出现错误: {e}")
            return None, None, None, 0

    @staticmethod
    def _calculate_angle(
        p1_arr: Optional[np.ndarray],
        p2_arr: Optional[np.ndarray],
        p3_arr: Optional[np.ndarray]
    ) -> Optional[float]:
        """
        计算由三点p1, p2, p3构成的角度 (p2是顶点)

        参数:
            p1_arr: 第一个点的坐标
            p2_arr: 顶点坐标
            p3_arr: 第三个点的坐标

        返回:
            Optional[float]: 角度值（度），如果无法计算则返回None
        """
        if p1_arr is None or p2_arr is None or p3_arr is None:
            return None

        v1 = p1_arr - p2_arr
        v2 = p3_arr - p2_arr

        dot_product = np.dot(v1, v2)
        norm_v1 = np.linalg.norm(v1)
        norm_v2 = np.linalg.norm(v2)

        if norm_v1 == 0 or norm_v2 == 0:
            return 0.0

        cos_angle = np.clip(dot_product / (norm_v1 * norm_v2), -1.0, 1.0)
        angle_rad = np.arccos(cos_angle)
        angle_deg = np.degrees(angle_rad)
        return float(angle_deg)

    @staticmethod
    def _get_midpoint(
        p1_arr: Optional[np.ndarray],
        p2_arr: Optional[np.ndarray]
    ) -> Optional[np.ndarray]:
        """
        计算两点的中点

        参数:
            p1_arr: 第一个点的坐标
            p2_arr: 第二个点的坐标

        返回:
            Optional[np.ndarray]: 中点坐标，如果无法计算则返回None
        """
        if p1_arr is None or p2_arr is None:
            return None
        return (p1_arr + p2_arr) / 2

    @staticmethod
    def _get_distance(
        p1_arr: Optional[np.ndarray],
        p2_arr: Optional[np.ndarray]
    ) -> Optional[float]:
        """
        计算两点之间的欧几里得距离

        参数:
            p1_arr: 第一个点的坐标
            p2_arr: 第二个点的坐标

        返回:
            Optional[float]: 距离值，如果无法计算则返回None
        """
        if p1_arr is None or p2_arr is None:
            return None
        return float(np.linalg.norm(p1_arr - p2_arr))

    def _calculate_study_features(
        self,
        kpts: np.ndarray,
        confs: np.ndarray
    ) -> Dict[str, Optional[float]]:
        """
        计算学习场景专用特征

        针对台灯场景下的儿童写作业行为，计算多维度注意力特征：
        1. 头部姿态特征（点头、偏转、稳定性、扭动、歪曲）
        2. 手部活动特征（写字动作、位置稳定性）
        3. 身体姿态特征（坐姿、前倾程度、对称性）
        4. 注意力集中度特征（视线方向、头部稳定性）
        5. 身体特征标签（坐姿标准性、头部姿态评级）

        参数:
            kpts: 关键点坐标数组
            confs: 关键点置信度数组

        返回:
            Dict[str, Optional[float]]: 包含学习行为特征的字典
        """
        # 安全检查：确保输入数据有效
        if kpts is None or confs is None or len(kpts) == 0 or len(confs) == 0:
            return {
                'head_pitch': None, 'head_yaw': None, 'head_stability': None,
                'head_tilt': None, 'head_twist': None, 'head_posture_score': None,
                'hand_activity': None, 'writing_posture': None, 'hand_stability': None,
                'sitting_posture': None, 'forward_lean': None, 'shoulder_level': None,
                'spine_straightness': None, 'body_symmetry': None, 'posture_score': None,
                'focus_score': None, 'distraction_level': None,
                'posture_grade': None, 'head_grade': None, 'symmetry_grade': None,
                # 新增特征
                'hand_to_face_distance': None,
                'eye_to_hand_angle': None, 
                'reading_posture_score': None,
                'gaze_direction': None
            }

        features: Dict[str, Optional[float]] = {
            # 头部姿态特征
            'head_pitch': None,      # 头部俯仰角（点头程度）
            'head_yaw': None,        # 头部偏转角（左右转头）
            'head_stability': None,   # 头部稳定性（减少晃动）
            'head_tilt': None,       # 头部倾斜角度（歪头程度）
            'head_twist': None,      # 头部扭转程度
            'head_posture_score': None,  # 头部姿态综合评分

            # 手部活动特征
            'hand_activity': None,    # 手部活动强度
            'writing_posture': None,  # 写字姿势评分
            'hand_stability': None,   # 手部位置稳定性

            # 身体姿态特征
            'sitting_posture': None,  # 坐姿评分
            'forward_lean': None,     # 前倾程度
            'shoulder_level': None,   # 肩膀水平度
            'spine_straightness': None,  # 脊柱直立度
            'body_symmetry': None,    # 身体对称性
            'posture_score': None,    # 坐姿综合评分

            # 注意力集中度
            'focus_score': None,      # 综合专注度评分
            'distraction_level': None,  # 分心程度

            # 身体特征标签（等级评定）
            'posture_grade': None,    # 坐姿等级（0-4: 较差/一般/良好/优秀）
            'head_grade': None,       # 头部姿态等级
            'symmetry_grade': None,   # 身体对称性等级
            
            # 新增特征：用于更精确区分喝水、阅读等行为
            'hand_to_face_distance': None,  # 手到脸部的距离
            'eye_to_hand_angle': None,      # 眼睛到手的视线角度
            'reading_posture_score': None,  # 阅读姿势评分
            'gaze_direction': None          # 视线方向
        }

        def get_kp(name: str) -> Optional[np.ndarray]:
            """获取置信度足够的关键点"""
            idx = self.kp_idx[name]
            # 检查索引是否在有效范围内
            if idx < len(confs) and idx < len(kpts) and confs[idx] >= self.conf_threshold:
                return kpts[idx]
            return None

        # 获取关键点
        nose = get_kp('nose')
        l_eye, r_eye = get_kp('left_eye'), get_kp('right_eye')
        l_shoulder, r_shoulder = get_kp(
            'left_shoulder'), get_kp('right_shoulder')
        l_elbow, r_elbow = get_kp('left_elbow'), get_kp('right_elbow')
        l_wrist, r_wrist = get_kp('left_wrist'), get_kp('right_wrist')

        # 计算头部姿态特征（包含扭动和歪曲检测）
        features.update(self._calculate_head_features(
            nose, l_eye, r_eye, l_shoulder, r_shoulder))

        # 计算手部活动特征
        features.update(self._calculate_hand_features(
            l_shoulder, r_shoulder, l_elbow, r_elbow, l_wrist, r_wrist))

        # 计算身体姿态特征（包含坐姿标准性）
        features.update(self._calculate_body_features(l_shoulder, r_shoulder))

        # 计算身体对称性和脊柱特征
        features.update(self._calculate_posture_features(
            nose, l_shoulder, r_shoulder))
            
        # 新增：计算阅读和交互特征
        features.update(self._calculate_reading_features(
            nose, l_eye, r_eye, l_wrist, r_wrist))

        # 计算综合注意力特征
        features.update(self._calculate_attention_features(features))

        # 计算身体特征等级标签
        features.update(self._calculate_feature_grades(features))

        return features
    
    def _calculate_reading_features(
        self, 
        nose: Optional[np.ndarray],
        l_eye: Optional[np.ndarray],
        r_eye: Optional[np.ndarray],
        l_wrist: Optional[np.ndarray],
        r_wrist: Optional[np.ndarray]
    ) -> Dict[str, Optional[float]]:
        """
        计算阅读相关特征和手部交互特征
        
        参数:
            nose: 鼻子关键点
            l_eye, r_eye: 左右眼关键点
            l_wrist, r_wrist: 左右手腕关键点
            
        返回:
            Dict[str, Optional[float]]: 阅读和交互特征字典
        """
        reading_features: Dict[str, Optional[float]] = {
            'hand_to_face_distance': None,
            'eye_to_hand_angle': None,
            'reading_posture_score': None,
            'gaze_direction': None
        }
        
        # 计算手到脸的距离
        if nose is not None and (l_wrist is not None or r_wrist is not None):
            closest_wrist = l_wrist if l_wrist is not None else r_wrist
            if l_wrist is not None and r_wrist is not None:
                # 使用距离鼻子最近的手腕
                dist_left = self._get_distance(nose, l_wrist)
                dist_right = self._get_distance(nose, r_wrist)
                if dist_left is not None and dist_right is not None:
                    closest_wrist = l_wrist if dist_left < dist_right else r_wrist
            
            hand_to_face_dist = self._get_distance(nose, closest_wrist)
            reading_features['hand_to_face_distance'] = hand_to_face_dist
        
        # 计算视线方向（基于头部朝向和眼睛方向的综合）
        if l_eye is not None and r_eye is not None and nose is not None:
            eye_center = self._get_midpoint(l_eye, r_eye)
            if eye_center is not None:
                # 计算从眼睛中心到鼻子的向量
                gaze_vector = nose - eye_center
                if abs(gaze_vector[0]) > 1e-6:
                    # 计算视线水平偏转角度
                    gaze_angle = math.degrees(math.atan2(gaze_vector[1], gaze_vector[0]))
                    reading_features['gaze_direction'] = gaze_angle
        
        # 计算眼睛到手的视线角度（判断是否在看书/写字）
        if (l_eye is not None and r_eye is not None and 
            (l_wrist is not None or r_wrist is not None)):
            
            eye_center = self._get_midpoint(l_eye, r_eye)
            active_wrist = r_wrist if r_wrist is not None else l_wrist
            
            if eye_center is not None and active_wrist is not None:
                # 计算从眼睛中心到手的向量
                eye_to_hand = active_wrist - eye_center
                # 计算视线向下的向量
                downward_vector = np.array([0, 1])
                
                if np.linalg.norm(eye_to_hand) > 0:
                    # 计算视线到手的角度
                    dot_product = np.dot(eye_to_hand, downward_vector)
                    cos_angle = dot_product / (np.linalg.norm(eye_to_hand) * np.linalg.norm(downward_vector))
                    angle_rad = np.arccos(np.clip(cos_angle, -1.0, 1.0))
                    angle_deg = np.degrees(angle_rad)
                    reading_features['eye_to_hand_angle'] = angle_deg
        
        # 计算阅读姿势评分（基于头部姿态和视线）
        reading_indicators = []
        
        head_pitch = self.head_positions[-1][1] if len(self.head_positions) > 0 else None
        if head_pitch is not None:
            # 理想的阅读姿势头部俯仰角为10-40度
            if 10 <= head_pitch <= 40:
                reading_indicators.append(1.0)  # 优秀
            elif 0 <= head_pitch <= 50:
                reading_indicators.append(0.8)  # 良好
            else:
                reading_indicators.append(0.4)  # 较差
        
        if reading_features['gaze_direction'] is not None:
            # 理想的视线方向应该基本向下
            gaze_score = max(0.0, 1.0 - abs(reading_features['gaze_direction'] - 90) / 90)
            reading_indicators.append(gaze_score)
        
        if reading_indicators:
            reading_features['reading_posture_score'] = float(np.mean(reading_indicators))
        
        return reading_features

    def _calculate_head_features(
        self,
        nose: Optional[np.ndarray],
        l_eye: Optional[np.ndarray],
        r_eye: Optional[np.ndarray],
        l_shoulder: Optional[np.ndarray],
        r_shoulder: Optional[np.ndarray]
    ) -> Dict[str, Optional[float]]:
        """
        计算头部姿态特征（包含扭动和歪曲检测）

        参数:
            nose: 鼻子关键点
            l_eye, r_eye: 左右眼关键点
            l_shoulder, r_shoulder: 左右肩膀关键点

        返回:
            Dict[str, Optional[float]]: 头部特征字典
        """
        head_features: Dict[str, Optional[float]] = {
            'head_pitch': None,
            'head_yaw': None,
            'head_stability': None,
            'head_tilt': None,
            'head_twist': None,
            'head_posture_score': None
        }

        # 计算头部俯仰角（写作业时应该适度低头）- 优化为符合"三个一"标准
        if nose is not None and l_shoulder is not None and r_shoulder is not None:
            shoulder_mid = self._get_midpoint(l_shoulder, r_shoulder)
            if shoulder_mid is not None:
                # 计算头部相对于肩膀的角度
                head_vector = nose - shoulder_mid
                # 俯仰角：正值表示抬头，负值表示低头
                if abs(head_vector[1]) > 1e-6:
                    # 优化角度计算，使其更符合实际学习场景
                    vertical_distance = head_vector[1]  # 垂直距离（负值表示头部在肩膀上方）
                    horizontal_distance = abs(head_vector[0])  # 水平距离

                    # 计算俯仰角度，正值表示低头（学习状态），负值表示抬头
                    if horizontal_distance > 1e-6:
                        pitch_angle = math.degrees(math.atan2(-vertical_distance, horizontal_distance))
                        # 限制角度范围到合理区间
                        pitch_angle = max(-60, min(60, pitch_angle))
                        head_features['head_pitch'] = pitch_angle

        # 计算头部偏转角（左右转头程度）- 优化眼部朝向检测
        if nose is not None and l_eye is not None and r_eye is not None:
            # 方法1: 通过眼睛到鼻子的距离比例判断头部偏转
            dist_nose_left = self._get_distance(nose, l_eye)
            dist_nose_right = self._get_distance(nose, r_eye)

            yaw_angle_method1 = None
            if (dist_nose_left is not None and dist_nose_right is not None
                    and dist_nose_right > 1e-6 and dist_nose_left > 1e-6):
                yaw_ratio = dist_nose_left / dist_nose_right
                # 优化的角度转换公式，更敏感地检测偏转
                if yaw_ratio > 1.05:  # 向左转，降低阈值提高敏感度
                    yaw_angle_method1 = min(50, (yaw_ratio - 1) * 100)
                elif yaw_ratio < 0.95:  # 向右转
                    yaw_angle_method1 = max(-50, (yaw_ratio - 1) * 100)
                else:
                    yaw_angle_method1 = 0  # 基本正面

            # 方法2: 通过眼睛中点相对于鼻子的位置判断
            yaw_angle_method2 = None
            if l_eye is not None and r_eye is not None and nose is not None:
                eye_center = self._get_midpoint(l_eye, r_eye)
                if eye_center is not None:
                    # 计算眼睛中心到鼻子的水平偏移
                    horizontal_offset = nose[0] - eye_center[0]
                    eye_distance = self._get_distance(l_eye, r_eye)
                    if eye_distance is not None and eye_distance > 1e-6:
                        # 标准化偏移量并转换为角度
                        normalized_offset = horizontal_offset / eye_distance
                        yaw_angle_method2 = np.clip(normalized_offset * 60, -45, 45)

            # 综合两种方法的结果
            if yaw_angle_method1 is not None and yaw_angle_method2 is not None:
                # 取两种方法的加权平均，距离比例法权重更高
                head_features['head_yaw'] = 0.7 * yaw_angle_method1 + 0.3 * yaw_angle_method2
            elif yaw_angle_method1 is not None:
                head_features['head_yaw'] = yaw_angle_method1
            elif yaw_angle_method2 is not None:
                head_features['head_yaw'] = yaw_angle_method2

        # 计算头部倾斜角度（歪头程度）
        if l_eye is not None and r_eye is not None:
            # 计算眼睛连线与水平线的夹角
            eye_vector = r_eye - l_eye
            if abs(eye_vector[0]) > 1e-6:
                tilt_angle = math.degrees(
                    math.atan2(eye_vector[1], eye_vector[0]))
                # 标准化角度到 -90 到 90 度范围
                if tilt_angle > 90:
                    tilt_angle -= 180
                elif tilt_angle < -90:
                    tilt_angle += 180
                head_features['head_tilt'] = tilt_angle

        # 计算头部扭转程度（基于面部特征的不对称性）
        if nose is not None and l_eye is not None and r_eye is not None:
            # 计算鼻子到两眼的距离比例，判断头部扭转
            dist_nose_left = self._get_distance(nose, l_eye)
            dist_nose_right = self._get_distance(nose, r_eye)

            if (dist_nose_left is not None and dist_nose_right is not None
                    and dist_nose_left > 1e-6 and dist_nose_right > 1e-6):
                # 计算扭转程度（0表示正面，正值表示向左扭转，负值表示向右扭转）
                twist_ratio = (dist_nose_left - dist_nose_right) / \
                    (dist_nose_left + dist_nose_right)
                twist_angle = twist_ratio * 45  # 映射到角度范围
                head_features['head_twist'] = twist_angle

        # 计算头部稳定性（基于历史位置）
        if len(self.head_positions) >= 10:
            positions = np.array(list(self.head_positions)[-10:])
            stability = 1.0 / (1.0 + np.var(positions, axis=0).mean())
            head_features['head_stability'] = float(stability)

        # 计算头部姿态综合评分 - 优化为符合"三个一"标准的学习姿势
        head_indicators = []
        if head_features['head_pitch'] is not None:
            pitch = head_features['head_pitch']
            # 根据"三个一"标准，理想的学习俯仰角为15-30度（适度低头看书写字）
            if 15 <= pitch <= 30:  # 理想学习角度（符合眼离书本一尺的标准）
                head_indicators.append(1.0)
            elif 10 <= pitch <= 35:  # 良好范围（稍微扩大容忍度）
                head_indicators.append(0.9)
            elif 5 <= pitch <= 40:  # 可接受范围
                head_indicators.append(0.7)
            elif 0 <= pitch <= 45:  # 勉强可接受
                head_indicators.append(0.5)
            else:  # 角度过大或过小（抬头太高或低头太厉害）
                head_indicators.append(0.3)

        if head_features['head_yaw'] is not None:
            yaw = abs(head_features['head_yaw'])
            if yaw <= 15:  # 基本正面
                head_indicators.append(1.0)
            elif yaw <= 30:  # 轻微偏转
                head_indicators.append(0.7)
            else:  # 明显偏转
                head_indicators.append(0.3)

        if head_indicators:
            head_features['head_posture_score'] = float(np.mean(head_indicators))

        return head_features

    def _calculate_hand_features(
        self,
        l_shoulder: Optional[np.ndarray],
        r_shoulder: Optional[np.ndarray],
        l_elbow: Optional[np.ndarray],
        r_elbow: Optional[np.ndarray],
        l_wrist: Optional[np.ndarray],
        r_wrist: Optional[np.ndarray]
    ) -> Dict[str, Optional[float]]:
        """
        计算手部活动特征

        参数:
            l_shoulder, r_shoulder: 左右肩膀关键点
            l_elbow, r_elbow: 左右肘部关键点
            l_wrist, r_wrist: 左右手腕关键点

        返回:
            Dict[str, Optional[float]]: 手部特征字典
        """
        hand_features: Dict[str, Optional[float]] = {
            'hand_activity': None,
            'writing_posture': None,
            'hand_stability': None
        }

        # 记录手部位置历史
        active_wrist = None
        if r_wrist is not None:  # 优先使用右手（大多数人的主手）
            active_wrist = r_wrist
        elif l_wrist is not None:
            active_wrist = l_wrist

        if active_wrist is not None:
            self.hand_positions.append(active_wrist.copy())

        # 计算手部活动强度
        if len(self.hand_positions) >= 5:
            recent_positions = np.array(list(self.hand_positions)[-5:])
            movement = np.sum(np.linalg.norm(
                np.diff(recent_positions, axis=0), axis=1))

            if movement > self.movement_threshold * 2:
                hand_features['hand_activity'] = 1.0  # 高活动
            elif movement > self.movement_threshold:
                hand_features['hand_activity'] = 0.5  # 中等活动
            elif movement > 0:
                hand_features['hand_activity'] = 0.1  # 轻微活动
            else:
                hand_features['hand_activity'] = 0.0  # 静止

        # 计算写字姿势评分
        if (r_shoulder is not None and r_elbow is not None and r_wrist is not None):
            # 计算右臂肘部角度
            elbow_angle = self._calculate_angle(r_shoulder, r_elbow, r_wrist)
            if elbow_angle is not None:
                # 理想的写字肘部角度约为90-120度
                if 80 <= elbow_angle <= 130:
                    hand_features['writing_posture'] = 1.0
                else:
                    hand_features['writing_posture'] = max(
                        0.0, 1.0 - abs(elbow_angle - 105) / 50)

        # 计算手部稳定性
        if len(self.hand_positions) >= 10:
            positions = np.array(list(self.hand_positions)[-10:])
            stability = 1.0 / (1.0 + np.var(positions, axis=0).mean())
            hand_features['hand_stability'] = float(stability)

        return hand_features

    def _calculate_body_features(
        self,
        l_shoulder: Optional[np.ndarray],
        r_shoulder: Optional[np.ndarray]
    ) -> Dict[str, Optional[float]]:
        """
        计算身体姿态特征（包含坐姿标准性）

        参数:
            l_shoulder, r_shoulder: 左右肩膀关键点

        返回:
            Dict[str, Optional[float]]: 身体特征字典
        """
        body_features: Dict[str, Optional[float]] = {
            'sitting_posture': None,
            'forward_lean': None,
            'shoulder_level': None,
            'spine_straightness': None,
            'body_symmetry': None,
            'posture_score': None
        }

        if l_shoulder is not None and r_shoulder is not None:
            # 记录肩膀位置历史
            shoulder_mid = self._get_midpoint(l_shoulder, r_shoulder)
            if shoulder_mid is not None:
                self.shoulder_positions.append(shoulder_mid.copy())

            # 计算肩膀水平度 - 修复评分过低问题
            shoulder_vector = r_shoulder - l_shoulder
            if abs(shoulder_vector[0]) > 1e-6:
                shoulder_angle = math.degrees(math.atan2(
                    shoulder_vector[1], shoulder_vector[0]))
                # 理想情况下肩膀应该水平，使用更宽容的评分
                abs_angle = abs(shoulder_angle)
                if abs_angle <= 10:  # 非常水平
                    body_features['shoulder_level'] = 1.0
                elif abs_angle <= 20:  # 基本水平
                    body_features['shoulder_level'] = 0.9 - 0.2 * (abs_angle - 10) / 10
                else:  # 倾斜较大
                    body_features['shoulder_level'] = max(0.5, 0.7 - 0.2 * (abs_angle - 20) / 20)
            else:
                # 如果无法计算，给一个中等评分
                body_features['shoulder_level'] = 0.8

            # 计算脊柱直立度（基于肩膀高度的稳定性）- 修复评分过低问题
            if len(self.shoulder_positions) >= 5:
                recent_positions = np.array(list(self.shoulder_positions)[-5:])
                y_variance = np.var(recent_positions[:, 1])
                # 修复：方差可能很小导致评分过低，使用更合理的缩放
                spine_straightness = 1.0 / (1.0 + y_variance * 0.01)  # 降低方差影响
                spine_straightness = min(1.0, max(0.3, spine_straightness))  # 限制范围
                body_features['spine_straightness'] = float(spine_straightness)

            # 计算身体对称性（左右肩膀高度差）- 修复评分过低问题
            shoulder_height_diff = abs(l_shoulder[1] - r_shoulder[1])
            shoulder_width = abs(l_shoulder[0] - r_shoulder[0])
            if shoulder_width > 0:
                # 对称性评分：高度差越小越好，使用更宽容的评分
                symmetry_ratio = shoulder_height_diff / shoulder_width
                # 修复：降低惩罚系数，提高基础分
                body_features['body_symmetry'] = max(0.5, 1.0 - symmetry_ratio * 1.0)  # 降低惩罚

            # 计算坐姿评分（基于肩膀稳定性）- 修复评分过低问题
            if len(self.shoulder_positions) >= 10:
                positions = np.array(list(self.shoulder_positions)[-10:])
                position_variance = np.var(positions, axis=0).mean()
                # 修复：方差计算导致评分过低，使用更合理的缩放
                posture_stability = 1.0 / (1.0 + position_variance * 0.001)  # 大幅降低方差影响
                posture_stability = min(1.0, max(0.4, posture_stability))  # 限制范围，提高最低分
                body_features['sitting_posture'] = float(posture_stability)
            else:
                # 如果历史数据不足，给一个中等评分
                body_features['sitting_posture'] = 0.7

        # 计算坐姿综合评分
        posture_indicators = []

        if body_features['shoulder_level'] is not None:
            posture_indicators.append(body_features['shoulder_level'])

        if body_features['spine_straightness'] is not None:
            posture_indicators.append(body_features['spine_straightness'])

        if body_features['body_symmetry'] is not None:
            posture_indicators.append(body_features['body_symmetry'])

        if body_features['sitting_posture'] is not None:
            posture_indicators.append(body_features['sitting_posture'])

        if posture_indicators:
            body_features['posture_score'] = float(np.mean(posture_indicators))

        return body_features

    def _calculate_attention_features(
        self,
        features: Dict[str, Optional[float]]
    ) -> Dict[str, Optional[float]]:
        """
        计算综合注意力特征（优化版）

        参数:
            features: 已计算的特征字典

        返回:
            Dict[str, Optional[float]]: 注意力特征字典
        """
        attention_features: Dict[str, Optional[float]] = {
            'focus_score': None,
            'distraction_level': None
        }

        # 计算综合专注度评分 - 大幅提高注意力权重，降低坐姿影响
        focus_indicators = []
        weights = []

        # 头部俯仰角贡献（适度低头表示专注）- 最高权重，直接反映学习状态
        head_pitch = features.get('head_pitch')
        if head_pitch is not None:
            # 基于"三个一"标准的学习低头角度评分
            if 15 <= head_pitch <= 30:  # 理想学习角度
                pitch_score = 1.0
            elif 10 <= head_pitch <= 35:  # 良好范围
                pitch_score = 0.9
            elif 5 <= head_pitch <= 40:  # 可接受范围
                pitch_score = 0.7
            elif 0 <= head_pitch <= 45:  # 勉强可接受
                pitch_score = 0.5
            elif -10 <= head_pitch <= 50:  # 较差但不算分心
                pitch_score = 0.4
            else:  # 明显不合适的角度
                pitch_score = 0.2

            focus_indicators.append(pitch_score)
            weights.append(0.5)  # 提高到50% 权重 - 头部朝向是最重要的注意力指标

        # 头部左右偏转贡献（东张西望检测）- 降低权重，更宽容的标准
        head_yaw = features.get('head_yaw')
        if head_yaw is not None:
            # 更宽容的偏转角度评分，避免误判正常的看书角度
            if abs(head_yaw) <= 20:  # 正常范围（看书时可能有轻微偏转）
                yaw_score = 1.0
            elif abs(head_yaw) <= 35:  # 轻微偏转，仍可接受
                yaw_score = 0.9 - 0.1 * (abs(head_yaw) - 20) / 15
            elif abs(head_yaw) <= 50:  # 中等偏转
                yaw_score = 0.8 - 0.2 * (abs(head_yaw) - 35) / 15
            elif abs(head_yaw) <= 70:  # 明显偏转
                yaw_score = 0.6 - 0.3 * (abs(head_yaw) - 50) / 20
            else:  # 严重偏转（明显东张西望）
                yaw_score = max(0.3, 0.3 - 0.2 * (abs(head_yaw) - 70) / 30)

            focus_indicators.append(yaw_score)
            weights.append(0.2)  # 降低到20% 权重 - 避免过度影响专注度判断

        # 头部稳定性贡献 - 中等权重，反映专注的持续性
        head_stability = features.get('head_stability')
        if head_stability is not None:
            # 头部稳定性直接反映注意力集中程度
            stability_score = min(1.0, head_stability + 0.1)  # 轻微提升基础分
            focus_indicators.append(stability_score)
            weights.append(0.15)  # 15% 权重

        # 手部活动贡献 - 大幅降低权重，专注度主要看头部和眼神
        hand_activity = features.get('hand_activity')
        if hand_activity is not None:
            if hand_activity > 0.3:  # 正常写字活动
                activity_score = 1.0
            elif hand_activity > 0:  # 轻微活动
                activity_score = 0.9
            else:  # 静止 - 可能在思考或阅读，不应扣分太多
                activity_score = 0.8  # 静止状态给较高评分

            focus_indicators.append(activity_score)
            weights.append(0.05)  # 降低到5% 权重 - 手部活动不是专注度的主要指标

        # 坐姿特征 - 最低权重，仅作为辅助参考，不影响专注度主要判断
        # 专注度应该主要基于注意力方向（头部朝向），而不是坐姿标准性
        # 完全移除坐姿对专注度的影响，专注度只看注意力相关指标

        # 计算加权平均评分
        if focus_indicators and weights:
            # 确保权重总和为1
            weights = np.array(weights)
            weights = weights / weights.sum()

            weighted_score = np.average(focus_indicators, weights=weights)

            # 应用平滑函数，避免极端低分
            final_score = max(0.1, min(1.0, weighted_score))

            attention_features['focus_score'] = float(final_score)
            attention_features['distraction_level'] = 1.0 - final_score

        return attention_features

    def _classify_study_attention(
        self,
        features: Dict[str, Optional[float]]
    ) -> Tuple[str, List[str]]:
        """
        基于学习行为特征分类注意力状态

        专为儿童写作业场景设计的注意力状态分类器，优化了判断逻辑：
        - 专注学习：正常的学习姿态和手部活动
        - 分心状态：东张西望、过度活动
        - 疲劳/睡觉：长时间静止、头部下垂
        - 离开状态：检测不到人或关键特征缺失

        参数:
            features: 学习行为特征字典

        返回:
            Tuple[str, List[str]]: (注意力状态, 详细原因列表)
        """
        reasons: List[str] = []

        # 获取关键特征
        focus_score = features.get('focus_score', 0.0)
        head_pitch = features.get('head_pitch')
        head_yaw = features.get('head_yaw')
        hand_activity = features.get('hand_activity')
        head_stability = features.get('head_stability')

        # 检测睡觉状态（最高优先级）
        if self._detect_sleeping(features):
            self.static_frame_count += 1
            if self.static_frame_count > self.stability_threshold:
                reasons.append(f"长时间静止 ({self.static_frame_count}帧)")
                reasons.append("可能睡着了")
                return "睡觉/疲劳", reasons
        else:
            self.static_frame_count = 0

        # 优化的分心检测逻辑 - 重点关注头部和眼部朝向
        severe_distraction_indicators = []  # 严重分心指标
        mild_distraction_indicators = []    # 轻微分心指标

        # 头部左右张望检测 - 这是最重要的分心指标
        if head_yaw is not None:
            if abs(head_yaw) > 40:  # 严重偏转 - 明显东张西望
                severe_distraction_indicators.append(f"明显东张西望 ({head_yaw:.1f}°)")
            elif abs(head_yaw) > 25:  # 中等偏转 - 注意力开始分散
                mild_distraction_indicators.append(f"视线偏移 ({head_yaw:.1f}°)")
            elif abs(head_yaw) > 15:  # 轻微偏转 - 仅作记录，不一定是分心
                # 只有持续偏转才认为是分心
                if hasattr(self, '_consecutive_yaw_count'):
                    self._consecutive_yaw_count += 1
                    if self._consecutive_yaw_count > 5:  # 连续5帧偏转
                        mild_distraction_indicators.append(f"持续视线偏移 ({head_yaw:.1f}°)")
                else:
                    self._consecutive_yaw_count = 1
            else:
                self._consecutive_yaw_count = 0

        # 头部过度抬起检测 - 更宽容，因为可能是正常的思考姿态
        if head_pitch is not None:
            if head_pitch > 70:  # 严重抬头 - 明显不在学习
                severe_distraction_indicators.append(f"头部抬起过高 ({head_pitch:.1f}°)")
            elif head_pitch > 55:  # 中等抬头 - 可能在看别处
                mild_distraction_indicators.append(f"可能在看别处 ({head_pitch:.1f}°)")
            elif head_pitch < -15:  # 头部过度下垂 - 可能疲劳
                mild_distraction_indicators.append(f"头部过度下垂 ({head_pitch:.1f}°)")

        # 手部活动检测 - 降低权重，因为坐姿问题可能影响检测
        if hand_activity is not None and hand_activity < -0.7:  # 更严格的阈值
            mild_distraction_indicators.append("手部活动异常")  # 降级为轻微分心

        # 头部稳定性检测 - 重点关注，因为频繁晃动明显表示分心
        if head_stability is not None:
            if head_stability < 0.15:  # 非常不稳定
                severe_distraction_indicators.append("头部晃动剧烈")
            elif head_stability < 0.25:  # 比较不稳定
                mild_distraction_indicators.append("头部晃动频繁")

        # 基于专注度评分的优先判断（修复状态刷新问题）
        if focus_score is not None:
            # 极高专注度：直接判断为专注，忽略其他指标
            if focus_score >= 0.9:  # 90分以上
                reasons.append(f"专注度极高 ({int(focus_score*100)}分)")
                self._add_positive_indicators(reasons, features)
                # 重置分心计数器
                self.distraction_count = 0
                self._consecutive_yaw_count = 0
                return "专注学习", reasons

            # 高专注度：直接判断为专注，重置分心计数
            elif focus_score >= 0.7:  # 70分以上
                reasons.append(f"专注度高 ({int(focus_score*100)}分)")
                self._add_positive_indicators(reasons, features)
                # 重置分心计数器
                self.distraction_count = 0
                self._consecutive_yaw_count = 0
                # 即使有轻微分心指标，也优先认为是专注的
                if severe_distraction_indicators:
                    reasons.extend(severe_distraction_indicators)
                    return "基本专注", reasons  # 降级但不判断为分心
                return "专注学习", reasons

            # 中等专注度：需要结合分心指标判断
            elif focus_score >= 0.45:  # 45分以上
                reasons.append(f"专注度中等 ({int(focus_score*100)}分)")

                # 有严重分心指标才判断为明显分心
                if len(severe_distraction_indicators) >= 2:
                    self.distraction_count += 1
                    reasons.extend(severe_distraction_indicators)
                    if self.distraction_count > 15:  # 提高连续分心阈值
                        return "明显分心", reasons
                    else:
                        return "轻微分心", reasons
                elif len(severe_distraction_indicators) >= 1 or len(mild_distraction_indicators) >= 2:
                    self.distraction_count += 1
                    reasons.extend(severe_distraction_indicators + mild_distraction_indicators)
                    # 如果分心计数不高，仍然可能是轻微分心
                    if self.distraction_count <= 5:
                        return "轻微分心", reasons
                    else:
                        return "明显分心", reasons
                else:
                    # 没有明显分心指标，重置计数器
                    self._add_mixed_indicators(reasons, features)
                    self.distraction_count = max(0, self.distraction_count - 2)  # 快速恢复
                    self._consecutive_yaw_count = 0
                    return "基本专注", reasons

            # 较低专注度
            elif focus_score >= 0.3:  # 30分以上
                reasons.append(f"专注度较低 ({int(focus_score*100)}分)")
                if severe_distraction_indicators:
                    self.distraction_count += 1
                    reasons.extend(severe_distraction_indicators)
                    return "明显分心", reasons
                else:
                    return "注意力不集中", reasons

            # 很低专注度
            else:
                reasons.append(f"专注度很低 ({int(focus_score*100)}分)")
                self.distraction_count += 1
                return "严重分心", reasons

        # 如果缺少关键特征
        reasons.append("特征信息不足")
        return "无法判断", reasons

    def _detect_sleeping(self, features: Dict[str, Optional[float]]) -> bool:
        """
        检测是否处于睡觉状态

        参数:
            features: 特征字典

        返回:
            bool: 是否可能在睡觉
        """
        head_pitch = features.get('head_pitch')
        hand_activity = features.get('hand_activity')
        head_stability = features.get('head_stability')

        sleep_indicators = 0

        # 头部过度下垂
        if head_pitch is not None and head_pitch < -10:
            sleep_indicators += 1

        # 手部完全静止
        if hand_activity is not None and hand_activity == 0:
            sleep_indicators += 1

        # 头部非常稳定（不动）
        if head_stability is not None and head_stability > 0.9:
            sleep_indicators += 1

        return sleep_indicators >= 2

    def _add_positive_indicators(
        self, reasons: List[str], features: Dict[str, Optional[float]]
    ) -> None:
        """添加积极的学习指标"""
        head_pitch = features.get('head_pitch')
        if head_pitch is not None and 10 <= head_pitch <= 30:
            reasons.append("头部姿态良好")

        writing_posture = features.get('writing_posture')
        if writing_posture is not None and writing_posture > 0.7:
            reasons.append("写字姿势正确")

        hand_activity = features.get('hand_activity')
        if hand_activity is not None and hand_activity > 0:
            reasons.append("手部活动正常")

    def _add_mixed_indicators(
        self, reasons: List[str], features: Dict[str, Optional[float]]
    ) -> None:
        """添加中等状态的指标"""
        head_yaw = features.get('head_yaw')
        if head_yaw is not None and abs(head_yaw) > 10:
            reasons.append("偶尔转头")

        hand_stability = features.get('hand_stability')
        if hand_stability is not None and hand_stability < 0.5:
            reasons.append("手部稍有不稳")

    def process_frame(
        self, frame: np.ndarray
    ) -> Tuple[str, List[str], Dict[str, Any], Dict[str, Optional[float]]]:
        """
        处理单帧图像，检测儿童学习注意力状态

        这是主要的处理接口，集成了姿态检测、特征提取、注意力分类、物品检测和行为分析。
        针对台灯场景下的儿童写作业行为进行优化分析。

        参数:
            frame: 输入图像帧

        返回:
            Tuple[str, List[str], Dict[str, Any], Dict[str, Optional[float]]]:
                - 注意力状态字符串（专注学习/分心/睡觉等）
                - 详细原因列表（包含物品检测和行为分析结果）
                - 关键点数据（用于可视化绘制，包含物品检测结果）
                - 学习行为特征字典（包含新增的身体特征）
        """
        scaled_kpts, confs, bbox, person_count = self._extract_keypoints(frame)

        keypoints_data_for_drawing: Dict[str, Any] = {
            'kpts': scaled_kpts,
            'confs': confs,
            'bbox': bbox,
            'person_count': person_count,
            'detected_objects': [],
            'object_analysis': {},
            'allowed_behaviors': []
        }

        if scaled_kpts is None or confs is None:
            # 即使没有检测到人，也进行物品检测（但不过滤空间关系）
            if self.enable_object_detection:
                detected_objects = self._detect_objects(frame)
                # 没有人体关键点时，只检测明显的分心物品
                distraction_objects = []
                for obj in detected_objects:
                    distraction_keywords = ['phone', 'mobile', 'cell', 'tablet', 'game']
                    if any(keyword in obj['class_name'].lower() for keyword in distraction_keywords):
                        distraction_objects.append(obj)

                object_analysis = self._analyze_detected_objects(distraction_objects)
                keypoints_data_for_drawing['detected_objects'] = distraction_objects
                keypoints_data_for_drawing['object_analysis'] = object_analysis

                # 如果检测到禁止物品，返回相应状态
                if object_analysis['forbidden_objects']:
                    warnings = object_analysis['warnings']
                    return "物品分心", warnings, keypoints_data_for_drawing, {}

            return "未检测到人", [], keypoints_data_for_drawing, {}

        # 使用新的学习特征计算方法
        features = self._calculate_study_features(scaled_kpts, confs)

        # 检测物品
        detected_objects = []
        object_analysis = {}
        if self.enable_object_detection:
            # 检测所有物品
            all_detected_objects = self._detect_objects(frame)
            # 根据与人体的空间关系过滤物品
            detected_objects = self._filter_objects_by_interaction(all_detected_objects, scaled_kpts)

            # 判断是否处于专注学习状态（基于初步特征）
            focus_score = features.get('focus_score', 0.0)
            head_pitch = features.get('head_pitch', 0)
            head_yaw = features.get('head_yaw', 0)
            is_studying_focused = (
                focus_score is not None and focus_score > 0.7 and  # 专注度高
                head_pitch is not None and 10 <= head_pitch <= 40 and  # 适度低头
                head_yaw is not None and abs(head_yaw) <= 15  # 视线基本正面
            )

            # 分析过滤后的物品，考虑学习状态
            object_analysis = self._analyze_detected_objects(detected_objects, is_studying_focused)
            keypoints_data_for_drawing['detected_objects'] = detected_objects
            keypoints_data_for_drawing['object_analysis'] = object_analysis

        # 检测允许的行为
        allowed_behaviors = self._detect_allowed_behaviors(features)
        keypoints_data_for_drawing['allowed_behaviors'] = allowed_behaviors

        # 使用新的学习注意力分类方法（考虑物品、行为和多人检测）
        attention_status, reasons = self._classify_study_attention_with_context(
            features, object_analysis, allowed_behaviors, person_count)

        return attention_status, reasons, keypoints_data_for_drawing, features

    def _classify_study_attention_with_context(
        self,
        features: Dict[str, Optional[float]],
        object_analysis: Dict[str, Any],
        allowed_behaviors: List[Dict[str, Any]],
        person_count: int = 1
    ) -> Tuple[str, List[str]]:
        """
        基于学习行为特征和上下文信息分类注意力状态

        参数:
            features: 学习行为特征字典
            object_analysis: 物品分析结果
            allowed_behaviors: 检测到的允许行为列表
            person_count: 检测到的人数

        返回:
            Tuple[str, List[str]]: (注意力状态, 详细原因列表)
        """
        reasons: List[str] = []

        # 首先检查专注度评分，如果极高则优先返回
        focus_score = features.get('focus_score', 0.0)
        if focus_score is not None and focus_score >= 0.9:  # 90分以上
            reasons.append(f"专注度极高 ({int(focus_score*100)}分)")
            # 即使有其他干扰因素，也认为是专注的
            return "专注学习", reasons

        # 优先检查多人干扰情况
        if person_count > 1:
            reasons.append(f"检测到{person_count}个人")
            reasons.append("可能有人干扰学习")

            # 多人情况下，即使专注度不错也要降级判断
            if focus_score is not None:
                if focus_score >= 0.8:  # 80分以上但有干扰，提高阈值
                    reasons.append(f"专注度很高 ({int(focus_score*100)}分)，但环境有干扰")
                    return "基本专注", reasons  # 不降级太多
                elif focus_score >= 0.6:  # 60分以上但有干扰
                    reasons.append(f"专注度尚可 ({int(focus_score*100)}分)，但环境有干扰")
                    return "轻微分心", reasons
                elif focus_score >= 0.4:  # 40分以上
                    reasons.append(f"专注度一般 ({int(focus_score*100)}分)，且环境有干扰")
                    return "明显分心", reasons
                else:
                    reasons.append(f"专注度较低 ({int(focus_score*100)}分)，且环境有干扰")
                    return "严重分心", reasons

        # 检查是否有主动使用的禁止物品（严重分心）
        active_distractions = object_analysis.get('active_distractions', [])
        if active_distractions:
            reasons.extend(object_analysis.get('warnings', []))
            return "严重分心", reasons

        # 检查是否有禁止物品但未主动使用
        forbidden_objects = object_analysis.get('forbidden_objects', [])
        if forbidden_objects:
            # 区分主动使用和被动存在
            passive_forbidden = [obj for obj in forbidden_objects
                               if not obj.get('is_actively_using', False)]

            if len(passive_forbidden) > 0:
                # 被动存在的禁止物品，根据专注度决定严重程度
                if focus_score is not None and focus_score >= 0.7:
                    # 专注学习时，被动存在的物品影响较小
                    reasons.extend(object_analysis.get('suggestions', []))
                    return "基本专注", reasons
                else:
                    # 非专注状态下，被动存在也是分心
                    reasons.extend(object_analysis.get('warnings', []))
                    return "轻微分心", reasons

        # 检查是否在进行允许的行为
        if allowed_behaviors:
            behavior_names = [behavior['name']
                              for behavior in allowed_behaviors]
            reasons.append(f"正在进行: {', '.join(behavior_names)}")

            # 如果是短暂的允许行为，给予宽容的判断
            current_time = time.time()
            for behavior in allowed_behaviors:
                behavior_type = behavior['type']
                if behavior_type in self.allowed_behavior_start_time:
                    duration = current_time - \
                        self.allowed_behavior_start_time[behavior_type]
                    max_duration = behavior.get('max_duration', 30)

                    if duration <= max_duration:
                        # 在允许时间内，状态为基本专注
                        reasons.append(
                            f"{behavior['description']}（{duration:.0f}秒）")
                        return "基本专注", reasons
                    else:
                        # 超过允许时间，提醒回到学习
                        reasons.append(f"{behavior['name']}时间过长，建议回到学习")
                        return "轻微分心", reasons
                else:
                    # 记录行为开始时间
                    self.allowed_behavior_start_time[behavior_type] = current_time
                    reasons.append(f"开始{behavior['description']}")
                    return "基本专注", reasons

        # 添加物品相关的建议
        if object_analysis.get('suggestions'):
            reasons.extend(object_analysis['suggestions'])

        # 考虑桌面整洁度对专注度的影响
        desktop_clutter_score = object_analysis.get('desktop_clutter_score', 0.0)
        if desktop_clutter_score > 0:
            # 轻微降低专注度评分，但不过度惩罚
            original_focus_score = features.get('focus_score', 0.0)
            if original_focus_score is not None:
                # 桌面杂乱最多扣减10%的专注度
                clutter_penalty = min(0.1, desktop_clutter_score * 0.3)
                adjusted_focus_score = max(0.0, original_focus_score - clutter_penalty)
                features['focus_score'] = adjusted_focus_score

                if desktop_clutter_score > 0.2:
                    reasons.append(f"桌面整洁度影响专注度(-{int(clutter_penalty*100)}分)")

        # 使用优化后的注意力分类逻辑
        base_status, base_reasons = self._classify_study_attention(features)
        reasons.extend(base_reasons)

        return base_status, reasons

    def reset_history(self) -> None:
        """
        重置历史数据

        在开始新的检测会话时调用，清除之前的历史数据。
        """
        self.head_positions.clear()
        self.hand_positions.clear()
        self.shoulder_positions.clear()
        self.attention_history.clear()
        self.static_frame_count = 0
        self.distraction_count = 0
        self._consecutive_yaw_count = 0  # 重置连续偏转计数
        self.last_movement_time = time.time()

        # 清理行为记录
        self.allowed_behavior_start_time.clear()
        self.forbidden_object_warnings.clear()

    def get_attention_summary(self, window_size: int = 30) -> Dict[str, Any]:
        """
        获取注意力状态摘要

        参数:
            window_size: 统计窗口大小（帧数）

        返回:
            Dict[str, Any]: 注意力状态统计摘要
        """
        if len(self.attention_history) < window_size:
            return {"insufficient_data": True}

        recent_states = list(self.attention_history)[-window_size:]

        # 统计各状态出现次数
        state_counts = {}
        for state in recent_states:
            state_counts[state] = state_counts.get(state, 0) + 1

        # 计算专注度百分比
        focused_states = ["专注学习", "基本专注"]
        focused_count = sum(state_counts.get(state, 0)
                            for state in focused_states)
        focus_percentage = (focused_count / len(recent_states)) * 100

        return {
            "focus_percentage": focus_percentage,
            "state_distribution": state_counts,
            "total_frames": len(recent_states),
            "current_trend": recent_states[-1] if recent_states else "未知"
        }

    def _calculate_posture_features(
        self,
        nose: Optional[np.ndarray],
        l_shoulder: Optional[np.ndarray],
        r_shoulder: Optional[np.ndarray]
    ) -> Dict[str, Optional[float]]:
        """
        计算身体对称性和脊柱特征

        参数:
            nose: 鼻子关键点
            l_eye, r_eye: 左右眼关键点
            l_shoulder, r_shoulder: 左右肩膀关键点

        返回:
            Dict[str, Optional[float]]: 姿态特征字典
        """
        posture_features: Dict[str, Optional[float]] = {}

        # 计算整体身体中轴线的直立度
        if nose is not None and l_shoulder is not None and r_shoulder is not None:
            # 计算肩膀中点
            shoulder_mid = self._get_midpoint(l_shoulder, r_shoulder)
            if shoulder_mid is not None:
                # 计算头部到肩膀中点的向量，评估身体中轴线
                body_axis = nose - shoulder_mid
                if abs(body_axis[0]) > 1e-6:
                    # 计算身体中轴线与垂直线的夹角
                    axis_angle = math.degrees(math.atan2(
                        abs(body_axis[0]), abs(body_axis[1])))
                    # 理想情况下应该接近0度（垂直）
                    axis_straightness = max(0.0, 1.0 - axis_angle / 30)
                    posture_features['body_axis_straightness'] = axis_straightness

        return posture_features

    def _calculate_feature_grades(
        self, features: Dict[str, Optional[float]]
    ) -> Dict[str, Optional[float]]:
        """
        计算身体特征等级标签

        参数:
            features: 已计算的特征字典

        返回:
            Dict[str, Optional[float]]: 特征等级字典
        """
        grades: Dict[str, Optional[float]] = {
            'posture_grade': None,    # 坐姿等级（0-4）
            'head_grade': None,       # 头部姿态等级（0-4）
            'symmetry_grade': None    # 身体对称性等级（0-4）
        }

        # 计算坐姿等级
        posture_score = features.get('posture_score')
        if posture_score is not None:
            if posture_score >= 0.85:
                grades['posture_grade'] = 4.0  # 优秀
            elif posture_score >= 0.70:
                grades['posture_grade'] = 3.0  # 良好
            elif posture_score >= 0.55:
                grades['posture_grade'] = 2.0  # 一般
            elif posture_score >= 0.40:
                grades['posture_grade'] = 1.0  # 较差
            else:
                grades['posture_grade'] = 0.0  # 很差

        # 计算头部姿态等级
        head_posture_score = features.get('head_posture_score')
        if head_posture_score is not None:
            if head_posture_score >= 0.85:
                grades['head_grade'] = 4.0  # 优秀
            elif head_posture_score >= 0.70:
                grades['head_grade'] = 3.0  # 良好
            elif head_posture_score >= 0.55:
                grades['head_grade'] = 2.0  # 一般
            elif head_posture_score >= 0.40:
                grades['head_grade'] = 1.0  # 较差
            else:
                grades['head_grade'] = 0.0  # 很差

        # 计算身体对称性等级
        body_symmetry = features.get('body_symmetry')
        if body_symmetry is not None:
            if body_symmetry >= 0.90:
                grades['symmetry_grade'] = 4.0  # 优秀
            elif body_symmetry >= 0.75:
                grades['symmetry_grade'] = 3.0  # 良好
            elif body_symmetry >= 0.60:
                grades['symmetry_grade'] = 2.0  # 一般
            elif body_symmetry >= 0.45:
                grades['symmetry_grade'] = 1.0  # 较差
            else:
                grades['symmetry_grade'] = 0.0  # 很差

        return grades

    def get_posture_labels(self, features: Dict[str, Optional[float]]) -> Dict[str, str]:
        """
        获取身体特征的文字标签

        参数:
            features: 特征字典

        返回:
            Dict[str, str]: 特征标签字典
        """
        labels = {}

        # 等级到文字的映射
        grade_labels = {
            4.0: "优秀", 3.0: "良好", 2.0: "一般", 1.0: "较差", 0.0: "很差"
        }

        # 坐姿标签
        posture_grade = features.get('posture_grade')
        if posture_grade is not None:
            labels['posture_label'] = grade_labels.get(posture_grade, "未知")

        # 头部姿态标签
        head_grade = features.get('head_grade')
        if head_grade is not None:
            labels['head_label'] = grade_labels.get(head_grade, "未知")

        # 身体对称性标签
        symmetry_grade = features.get('symmetry_grade')
        if symmetry_grade is not None:
            labels['symmetry_label'] = grade_labels.get(symmetry_grade, "未知")

        # 头部扭动和歪曲状态
        head_tilt = features.get('head_tilt')
        if head_tilt is not None:
            if abs(head_tilt) < 5:
                labels['head_tilt_status'] = "头部端正"
            elif abs(head_tilt) < 15:
                labels['head_tilt_status'] = "轻微歪头"
            else:
                labels['head_tilt_status'] = "明显歪头"

        head_twist = features.get('head_twist')
        if head_twist is not None:
            if abs(head_twist) < 5:
                labels['head_twist_status'] = "头部正面"
            elif abs(head_twist) < 15:
                labels['head_twist_status'] = "轻微扭头"
            else:
                labels['head_twist_status'] = "明显扭头"

        return labels

    def _detect_objects(self, frame: np.ndarray) -> List[Dict[str, Any]]:
        """
        检测图像中的物品（排除人物）

        参数:
            frame: 输入图像帧

        返回:
            List[Dict[str, Any]]: 检测到的物品列表
        """
        detected_objects = []

        if not self.enable_object_detection or self.object_model is None:
            return detected_objects

        try:
            results = self.object_model(frame, verbose=False)

            if results and len(results) > 0:
                result = results[0]

                if hasattr(result, 'boxes') and result.boxes is not None:
                    boxes = result.boxes

                    for i in range(len(boxes)):
                        # 获取边界框
                        if hasattr(boxes, 'xyxy') and len(boxes.xyxy) > i:
                            bbox = boxes.xyxy[i].cpu().numpy()
                        else:
                            continue

                        # 获取置信度
                        if hasattr(boxes, 'conf') and len(boxes.conf) > i:
                            confidence = float(boxes.conf[i].cpu().numpy())
                        else:
                            confidence = 0.0

                        # 获取类别
                        if hasattr(boxes, 'cls') and len(boxes.cls) > i:
                            class_id = int(boxes.cls[i].cpu().numpy())
                            class_name = self.object_model.names.get(
                                class_id, f"class_{class_id}")
                        else:
                            continue

                        # 过滤掉人物类别和低置信度检测
                        excluded_classes = [
                            'person', 'people', 'human', 'man', 'woman', 'child', 'boy', 'girl',
                            'face', 'head', 'hand', 'arm', 'leg', 'body'  # 人体部位
                        ]

                        # 检测学习相关和分心相关的物品
                        relevant_keywords = [
                            # 学习用品
                            'bottle', 'cup', 'book', 'pen', 'pencil', 'eraser', 'ruler',
                            'calculator', 'notebook', 'paper', 'desk', 'chair', 'lamp',
                            'mouse', 'keyboard',
                            # 分心物品 - 重点检测
                            'phone', 'mobile', 'cell', 'tablet', 'laptop', 'computer',
                            'toy', 'game', 'headphones', 'earphones', 'headset'
                        ]

                        # 对于容易误检的物品，提高置信度阈值
                        confidence_threshold = 0.3
                        if any(keyword in class_name.lower() for keyword in ['keyboard', 'mouse', 'laptop', 'computer']):
                            confidence_threshold = 0.6  # 电子设备需要更高置信度
                        elif any(keyword in class_name.lower() for keyword in ['phone', 'cell', 'mobile']):
                            confidence_threshold = 0.7  # 手机检测需要很高置信度

                        if (confidence > confidence_threshold and
                            class_name.lower() not in excluded_classes):

                            # 检查是否包含相关关键词
                            is_relevant = any(keyword in class_name.lower() for keyword in relevant_keywords)

                            # 特别处理手机检测 - 多种可能的类别名称
                            phone_keywords = ['phone', 'mobile', 'cell', 'iphone', 'android', 'smartphone']
                            is_phone = any(phone_word in class_name.lower() for phone_word in phone_keywords)

                            if is_relevant or is_phone:
                                # 计算物品中心点和面积
                                center_x = (bbox[0] + bbox[2]) / 2
                                center_y = (bbox[1] + bbox[3]) / 2
                                area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])

                                detected_objects.append({
                                    'class_name': class_name,
                                    'confidence': confidence,
                                    'bbox': bbox,
                                    'class_id': class_id,
                                    'center': [center_x, center_y],
                                    'area': area
                                })

                                # 调试信息：打印检测到的物品
                                print(f"检测到物品: {class_name} (置信度: {confidence:.2f})")

        except Exception as e:
            print(f"物品检测过程中出现错误: {e}")

        return detected_objects

    def _filter_objects_by_interaction(
        self,
        detected_objects: List[Dict[str, Any]],
        person_keypoints: Optional[np.ndarray] = None
    ) -> List[Dict[str, Any]]:
        """
        根据与人体的空间关系过滤物品，增强视线朝向和手部交互检测

        只保留在学习区域内或与人体有交互可能的物品，并检测视线朝向和手部交互

        参数:
            detected_objects: 检测到的物品列表
            person_keypoints: 人体关键点坐标

        返回:
            List[Dict[str, Any]]: 过滤后的物品列表，包含交互分析
        """
        if not detected_objects or person_keypoints is None:
            return detected_objects

        filtered_objects = []

        # 计算人体的活动区域
        valid_keypoints = []
        for i, kpt in enumerate(person_keypoints):
            if i < len(COCO_KP_NAMES) and kpt is not None:
                # 检查关键点是否有效（不是零点且在合理范围内）
                if len(kpt) >= 2 and kpt[0] > 0 and kpt[1] > 0:
                    valid_keypoints.append(kpt)

        if len(valid_keypoints) < 3:  # 至少需要3个有效关键点
            return detected_objects  # 无法确定人体区域，返回所有物品

        # 获取关键点位置用于交互检测
        def get_kp(name: str) -> Optional[np.ndarray]:
            """获取指定关键点"""
            idx = self.kp_idx.get(name)
            if idx is not None and idx < len(person_keypoints):
                kpt = person_keypoints[idx]
                if kpt is not None and len(kpt) >= 2 and kpt[0] > 0 and kpt[1] > 0:
                    return kpt
            return None

        # 获取关键关键点
        nose = get_kp('nose')
        l_eye, r_eye = get_kp('left_eye'), get_kp('right_eye')
        l_wrist, r_wrist = get_kp('left_wrist'), get_kp('right_wrist')

        # 计算人体边界框
        valid_keypoints = np.array(valid_keypoints)
        person_bbox = [
            np.min(valid_keypoints[:, 0]),  # x_min
            np.min(valid_keypoints[:, 1]),  # y_min
            np.max(valid_keypoints[:, 0]),  # x_max
            np.max(valid_keypoints[:, 1])   # y_max
        ]

        # 扩展学习交互区域（人体周围的合理范围）
        person_width = person_bbox[2] - person_bbox[0]
        person_height = person_bbox[3] - person_bbox[1]

        # 学习区域：人体前方和两侧的扩展区域
        interaction_zone = [
            person_bbox[0] - person_width * 0.5,   # 左侧扩展50%
            person_bbox[1] - person_height * 0.2,  # 上方扩展20%
            person_bbox[2] + person_width * 0.5,   # 右侧扩展50%
            person_bbox[3] + person_height * 0.3   # 下方扩展30%（桌面区域）
        ]

        for obj in detected_objects:
            obj_center = obj.get('center', [0, 0])
            obj_bbox = obj.get('bbox', [0, 0, 0, 0])

            # 检查物品是否在交互区域内
            is_in_interaction_zone = (
                interaction_zone[0] <= obj_center[0] <= interaction_zone[2] and
                interaction_zone[1] <= obj_center[1] <= interaction_zone[3]
            )

            # 检查物品是否与人体有重叠（手持物品）
            person_bbox_expanded = [
                person_bbox[0] - 20, person_bbox[1] - 20,
                person_bbox[2] + 20, person_bbox[3] + 20
            ]

            has_overlap = not (
                obj_bbox[2] < person_bbox_expanded[0] or  # 物品在人体左侧
                obj_bbox[0] > person_bbox_expanded[2] or  # 物品在人体右侧
                obj_bbox[3] < person_bbox_expanded[1] or  # 物品在人体上方
                obj_bbox[1] > person_bbox_expanded[3]     # 物品在人体下方
            )

            # 特殊处理：某些物品即使远离也要检测（如手机等分心物品）
            distraction_objects = ['phone', 'mobile', 'cell', 'tablet', 'game', 'keyboard', 'mouse']
            is_distraction = any(dist_obj in obj['class_name'].lower()
                               for dist_obj in distraction_objects)

            # 新增：检测视线朝向物品（孩子是否在看这个物品）
            is_looking_at_object = self._detect_gaze_towards_object(
                obj_center, nose, l_eye, r_eye)

            # 新增：检测手部与物品交互（孩子是否在操作这个物品）
            hand_interaction_level = self._detect_hand_object_interaction(
                obj_center, obj_bbox, l_wrist, r_wrist)

            # 保留条件：在交互区域内 OR 与人体有重叠 OR 是分心物品
            if is_in_interaction_zone or has_overlap or is_distraction:
                # 添加增强的交互信息
                obj['interaction_info'] = {
                    'in_interaction_zone': is_in_interaction_zone,
                    'has_overlap': has_overlap,
                    'is_distraction': is_distraction,
                    'distance_to_person': self._calculate_distance_to_person(obj_center, person_bbox),
                    # 新增交互检测结果
                    'is_looking_at': is_looking_at_object,
                    'hand_interaction_level': hand_interaction_level,
                    'is_actively_using': hand_interaction_level > 0.5 or is_looking_at_object
                }
                filtered_objects.append(obj)

        return filtered_objects

    def _calculate_distance_to_person(self, obj_center: List[float], person_bbox: List[float]) -> float:
        """计算物品中心到人体边界框的最短距离"""
        person_center_x = (person_bbox[0] + person_bbox[2]) / 2
        person_center_y = (person_bbox[1] + person_bbox[3]) / 2

        dx = obj_center[0] - person_center_x
        dy = obj_center[1] - person_center_y

        return float(np.sqrt(dx*dx + dy*dy))

    def _detect_gaze_towards_object(
        self,
        obj_center: List[float],
        nose: Optional[np.ndarray],
        l_eye: Optional[np.ndarray],
        r_eye: Optional[np.ndarray]
    ) -> bool:
        """
        检测孩子的视线是否朝向指定物品

        参数:
            obj_center: 物品中心坐标
            nose: 鼻子关键点
            l_eye, r_eye: 左右眼关键点

        返回:
            bool: 是否在看这个物品
        """
        if not all([nose is not None, l_eye is not None, r_eye is not None]):
            return False

        # 计算眼睛中心点
        eye_center = self._get_midpoint(l_eye, r_eye)
        if eye_center is None:
            return False

        # 计算头部朝向向量（从眼睛中心到鼻子）
        head_direction = nose - eye_center

        # 计算从眼睛中心到物品的向量
        obj_vector = np.array(obj_center) - eye_center

        # 计算两个向量的夹角
        if np.linalg.norm(head_direction) > 0 and np.linalg.norm(obj_vector) > 0:
            cos_angle = np.dot(head_direction, obj_vector) / (
                np.linalg.norm(head_direction) * np.linalg.norm(obj_vector))
            angle_deg = np.degrees(np.arccos(np.clip(cos_angle, -1.0, 1.0)))

            # 如果角度小于30度，认为在看这个物品
            return angle_deg < 30

        return False

    def _detect_hand_object_interaction(
        self,
        obj_center: List[float],
        obj_bbox: List[float],
        l_wrist: Optional[np.ndarray],
        r_wrist: Optional[np.ndarray]
    ) -> float:
        """
        检测手部与物品的交互程度

        参数:
            obj_center: 物品中心坐标
            obj_bbox: 物品边界框 [x1, y1, x2, y2]
            l_wrist, r_wrist: 左右手腕关键点

        返回:
            float: 交互程度 (0.0-1.0)，0表示无交互，1表示强交互
        """
        interaction_level = 0.0

        # 检查左手交互
        if l_wrist is not None:
            left_interaction = self._calculate_hand_object_interaction_level(
                l_wrist, obj_center, obj_bbox)
            interaction_level = max(interaction_level, left_interaction)

        # 检查右手交互
        if r_wrist is not None:
            right_interaction = self._calculate_hand_object_interaction_level(
                r_wrist, obj_center, obj_bbox)
            interaction_level = max(interaction_level, right_interaction)

        return interaction_level

    def _calculate_hand_object_interaction_level(
        self,
        wrist: np.ndarray,
        obj_center: List[float],
        obj_bbox: List[float]
    ) -> float:
        """
        计算单个手与物品的交互程度

        参数:
            wrist: 手腕关键点
            obj_center: 物品中心坐标
            obj_bbox: 物品边界框

        返回:
            float: 交互程度 (0.0-1.0)
        """
        # 计算手腕到物品中心的距离
        distance = self._get_distance(wrist, np.array(obj_center))
        if distance is None:
            return 0.0

        # 检查手是否在物品边界框内或附近
        x1, y1, x2, y2 = obj_bbox
        hand_x, hand_y = wrist[0], wrist[1]

        # 扩展物品边界框，考虑手部操作的范围
        margin = 50  # 像素
        expanded_bbox = [x1 - margin, y1 - margin, x2 + margin, y2 + margin]

        # 检查手是否在扩展边界框内
        is_near_object = (
            expanded_bbox[0] <= hand_x <= expanded_bbox[2] and
            expanded_bbox[1] <= hand_y <= expanded_bbox[3]
        )

        if not is_near_object:
            return 0.0

        # 根据距离计算交互程度
        # 距离越近，交互程度越高
        max_interaction_distance = 100  # 最大交互距离（像素）
        if distance <= max_interaction_distance:
            interaction_level = 1.0 - (distance / max_interaction_distance)
            return max(0.0, min(1.0, interaction_level))

        return 0.0

    def _analyze_detected_objects(
        self,
        detected_objects: List[Dict[str, Any]],
        is_studying_focused: bool = False
    ) -> Dict[str, Any]:
        """
        智能分析检测到的物品，考虑学习状态和物品使用情况

        参数:
            detected_objects: 检测到的物品列表
            is_studying_focused: 是否处于专注学习状态

        返回:
            Dict[str, Any]: 物品分析结果
        """
        analysis = {
            'allowed_objects': [],
            'forbidden_objects': [],
            'warnings': [],
            'suggestions': [],
            'desktop_clutter_score': 0.0,  # 桌面整洁度评分
            'active_distractions': [],      # 主动使用的分心物品
            'passive_items': []             # 被动存在的物品
        }

        current_time = time.time()

        # 统计桌面物品数量和类型
        desktop_items_count = 0
        unused_items_count = 0

        for obj in detected_objects:
            class_name = obj['class_name']
            confidence = obj['confidence']
            interaction_info = obj.get('interaction_info', {})

            # 获取交互信息
            is_actively_using = interaction_info.get('is_actively_using', False)
            is_looking_at = interaction_info.get('is_looking_at', False)
            hand_interaction_level = interaction_info.get('hand_interaction_level', 0.0)

            # 检查物品是否允许
            is_allowed, obj_info = self.behavior_config.is_object_allowed(class_name)

            if is_allowed:
                # 允许的物品处理
                obj_data = {
                    'name': obj_info['name'],
                    'class_name': class_name,
                    'confidence': confidence,
                    'max_duration': obj_info.get('max_duration', 60),
                    'description': obj_info.get('description', ''),
                    'bbox': obj['bbox'],
                    'is_actively_using': is_actively_using,
                    'interaction_level': hand_interaction_level
                }

                analysis['allowed_objects'].append(obj_data)
                desktop_items_count += 1

                # 如果孩子专注学习且没有使用这个物品，不产生警告
                if is_studying_focused and not is_actively_using:
                    analysis['passive_items'].append(obj_data)
                    unused_items_count += 1
                    # 轻微扣分，鼓励整理桌面，但不影响主要评分
                    analysis['desktop_clutter_score'] += 0.05
                elif is_actively_using:
                    # 正在使用的物品，检查使用时长
                    if class_name not in self.allowed_behavior_start_time:
                        self.allowed_behavior_start_time[class_name] = current_time
                        analysis['suggestions'].append(
                            f"正在使用{obj_info['name']}，{obj_info.get('description', '请适度使用')}")
                    else:
                        duration = current_time - self.allowed_behavior_start_time[class_name]
                        max_duration = obj_info.get('max_duration', 60)

                        if duration > max_duration:
                            analysis['warnings'].append(
                                f"{obj_info['name']}使用时间过长({duration:.0f}秒)，建议回到学习")
                else:
                    # 非专注状态下的物品存在
                    unused_items_count += 1
                    analysis['desktop_clutter_score'] += 0.1

            else:
                # 禁止的物品处理
                obj_data = {
                    'name': obj_info['name'],
                    'class_name': class_name,
                    'confidence': confidence,
                    'severity': obj_info.get('severity', 'medium'),
                    'description': obj_info.get('description', ''),
                    'bbox': obj['bbox'],
                    'is_actively_using': is_actively_using,
                    'interaction_level': hand_interaction_level
                }

                analysis['forbidden_objects'].append(obj_data)

                # 根据使用情况生成不同级别的警告
                if is_actively_using or is_looking_at:
                    # 主动使用分心物品 - 严重警告
                    analysis['active_distractions'].append(obj_data)
                    warning_msg = f"正在使用{obj_info['name']}！{obj_info.get('description', '请立即停止并专心学习')}"
                    analysis['warnings'].append(warning_msg)
                elif is_studying_focused:
                    # 专注学习时的被动存在 - 轻微提醒
                    analysis['passive_items'].append(obj_data)
                    analysis['suggestions'].append(f"建议收起{obj_info['name']}，保持桌面整洁")
                    analysis['desktop_clutter_score'] += 0.15
                else:
                    # 非专注状态下的存在 - 中等警告
                    warning_msg = f"检测到{obj_info['name']}，{obj_info.get('description', '请收起并专心学习')}"
                    analysis['warnings'].append(warning_msg)

                # 记录警告历史
                if class_name not in self.forbidden_object_warnings:
                    self.forbidden_object_warnings[class_name] = []
                self.forbidden_object_warnings[class_name].append(current_time)

        # 计算桌面整洁度评分和建议
        if desktop_items_count > 0:
            analysis['desktop_clutter_score'] = min(1.0, analysis['desktop_clutter_score'])

            # 生成桌面整理建议
            if analysis['desktop_clutter_score'] > 0.3:
                analysis['suggestions'].append(
                    f"桌面有{unused_items_count}件未使用物品，建议整理桌面提高专注度")
            elif analysis['desktop_clutter_score'] > 0.1:
                analysis['suggestions'].append("保持桌面整洁有助于提高学习效率")

        # 物品检测后验证 - 减少误检
        analysis = self._validate_object_detection(analysis)

        # 清理过期的行为记录
        self._cleanup_behavior_records(current_time)

        return analysis

    def _validate_object_detection(self, analysis: Dict[str, Any]) -> Dict[str, Any]:
        """
        验证物品检测结果，减少误检

        参数:
            analysis: 物品分析结果

        返回:
            Dict[str, Any]: 验证后的分析结果
        """
        # 获取所有检测到的物品
        all_objects = analysis.get('allowed_objects', []) + analysis.get('forbidden_objects', [])
        detected_classes = [obj['class_name'].lower() for obj in all_objects]

        # 验证规则1: 键盘通常应该配合鼠标或电脑出现
        keyboard_objects = [obj for obj in analysis.get('forbidden_objects', [])
                           if 'keyboard' in obj['class_name'].lower()]

        if keyboard_objects:
            has_computer_context = any(
                keyword in ' '.join(detected_classes)
                for keyword in ['mouse', 'computer', 'laptop', 'monitor', 'screen']
            )

            if not has_computer_context:
                # 没有电脑相关上下文，可能是误检，降低严重程度或移除
                for kb_obj in keyboard_objects:
                    if kb_obj['confidence'] < 0.7:  # 置信度不够高
                        analysis['forbidden_objects'].remove(kb_obj)
                        analysis['warnings'] = [w for w in analysis.get('warnings', [])
                                              if '键盘' not in w]
                        # 添加到建议而不是警告
                        analysis['suggestions'].append("检测到疑似键盘物品，请确认桌面整洁")

        # 验证规则2: 手机检测需要更严格验证
        phone_objects = [obj for obj in analysis.get('forbidden_objects', [])
                        if any(keyword in obj['class_name'].lower()
                              for keyword in ['phone', 'cell', 'mobile'])]

        for phone_obj in phone_objects:
            if phone_obj['confidence'] < 0.8:  # 手机需要很高置信度
                analysis['forbidden_objects'].remove(phone_obj)
                analysis['warnings'] = [w for w in analysis.get('warnings', [])
                                      if '手机' not in w]

        # 验证规则3: 学习场景下，书本不应该被误识别为电子设备
        book_related = any(keyword in ' '.join(detected_classes)
                          for keyword in ['book', 'notebook', 'paper'])

        if book_related:
            # 如果检测到书本相关物品，降低电子设备误检的可能性
            suspicious_objects = []
            for obj in analysis.get('forbidden_objects', []):
                if (any(keyword in obj['class_name'].lower()
                       for keyword in ['keyboard', 'tablet', 'laptop']) and
                    obj['confidence'] < 0.8):
                    suspicious_objects.append(obj)

            for sus_obj in suspicious_objects:
                analysis['forbidden_objects'].remove(sus_obj)
                analysis['warnings'] = [w for w in analysis.get('warnings', [])
                                      if sus_obj['name'] not in w]
                analysis['suggestions'].append(f"检测到疑似{sus_obj['name']}，可能是书本误识别")

        return analysis

    def _cleanup_behavior_records(self, current_time: float) -> None:
        """清理过期的行为记录"""
        # 清理允许行为记录（超过5分钟）
        expired_behaviors = []
        for behavior, start_time in self.allowed_behavior_start_time.items():
            if current_time - start_time > 300:  # 5分钟
                expired_behaviors.append(behavior)

        for behavior in expired_behaviors:
            del self.allowed_behavior_start_time[behavior]

        # 清理警告记录（保留最近1小时）
        for obj_name in list(self.forbidden_object_warnings.keys()):
            warnings = self.forbidden_object_warnings[obj_name]
            # 只保留最近1小时的警告
            recent_warnings = [w for w in warnings if current_time - w < 3600]
            if recent_warnings:
                self.forbidden_object_warnings[obj_name] = recent_warnings
            else:
                del self.forbidden_object_warnings[obj_name]

    def _detect_allowed_behaviors(self, features: Dict[str, Optional[float]]) -> List[Dict[str, Any]]:
        """
        检测允许的行为（如伸懒腰、喝水等）

        参数:
            features: 特征字典

        返回:
            List[Dict[str, Any]]: 检测到的允许行为列表
        """
        detected_behaviors = []
        current_time = time.time()

        # 检测伸懒腰行为
        if self._is_stretching(features):
            behavior_config = self.behavior_config.get_behavior_config(
                'stretch')
            if behavior_config:
                detected_behaviors.append({
                    'type': 'stretch',
                    'name': behavior_config['name'],
                    'description': behavior_config['description'],
                    'max_duration': behavior_config['max_duration'],
                    'timestamp': current_time
                })

        # 检测喝水行为
        if self._is_drinking_water(features):
            behavior_config = self.behavior_config.get_behavior_config(
                'drink_water')
            if behavior_config:
                detected_behaviors.append({
                    'type': 'drink_water',
                    'name': behavior_config['name'],
                    'description': behavior_config['description'],
                    'max_duration': behavior_config['max_duration'],
                    'timestamp': current_time
                })

        # 检测调整坐姿行为
        if self._is_adjusting_posture(features):
            behavior_config = self.behavior_config.get_behavior_config(
                'adjust_posture')
            if behavior_config:
                detected_behaviors.append({
                    'type': 'adjust_posture',
                    'name': behavior_config['name'],
                    'description': behavior_config['description'],
                    'max_duration': behavior_config['max_duration'],
                    'timestamp': current_time
                })

        return detected_behaviors

    def _is_stretching(self, features: Dict[str, Optional[float]]) -> bool:
        """
        检测是否在伸懒腰（优化版）

        使用多维度特征和时序验证，减少误判
        """
        # 获取关键特征
        head_pitch = features.get('head_pitch')
        hand_activity = features.get('hand_activity')
        hand_to_face_distance = features.get('hand_to_face_distance')

        # 伸懒腰的特征组合
        stretch_indicators = []

        # 1. 头部明显后仰（超过正常学习角度）
        if head_pitch is not None:
            if head_pitch > 45:  # 明显后仰，提高阈值
                stretch_indicators.append(1.0)
            elif head_pitch > 35:  # 中等后仰
                stretch_indicators.append(0.6)
            else:
                stretch_indicators.append(0.0)

        # 2. 手部活动但不是在脸部附近（排除喝水）
        if hand_activity is not None and hand_to_face_distance is not None:
            if hand_activity > 0.3 and hand_to_face_distance > 100:  # 手部活跃但远离脸部
                stretch_indicators.append(1.0)
            elif hand_activity > 0.1:
                stretch_indicators.append(0.3)
            else:
                stretch_indicators.append(0.0)

        # 3. 基于头部角度判断是否在正常学习状态
        if head_pitch is not None:
            if head_pitch < 5:  # 头部过于抬起，可能在伸懒腰
                stretch_indicators.append(0.8)
            elif head_pitch < 15:  # 头部稍微抬起
                stretch_indicators.append(0.4)
            else:
                stretch_indicators.append(0.0)  # 正常低头学习状态，不太可能伸懒腰

        # 4. 时序验证：检查行为持续性
        current_stretch_score = np.mean(stretch_indicators) if stretch_indicators else 0.0
        self.temporal_behavior_validator['stretch_sequence'].append(current_stretch_score)

        # 需要连续几帧都有伸懒腰特征才确认
        recent_scores = list(self.temporal_behavior_validator['stretch_sequence'])[-5:]
        if len(recent_scores) >= 3:
            avg_recent_score = np.mean(recent_scores)
            # 需要平均分数超过0.6且最近3帧都有一定分数
            return bool(avg_recent_score > 0.6 and all(score > 0.3 for score in recent_scores[-3:]))

        return False

    def _is_drinking_water(self, features: Dict[str, Optional[float]]) -> bool:
        """
        检测是否在喝水（优化版）

        结合手部位置、头部姿态和物品检测，提高准确性
        """
        # 获取关键特征
        head_pitch = features.get('head_pitch')
        hand_activity = features.get('hand_activity')
        hand_to_face_distance = features.get('hand_to_face_distance')

        # 喝水的特征组合
        drink_indicators = []

        # 1. 头部轻微后仰（喝水角度，不如伸懒腰那么大）
        if head_pitch is not None:
            if 15 <= head_pitch <= 35:  # 喝水的典型角度范围
                drink_indicators.append(1.0)
            elif 10 <= head_pitch <= 45:  # 可能的喝水角度
                drink_indicators.append(0.6)
            else:
                drink_indicators.append(0.0)

        # 2. 手部接近脸部（关键特征）
        if hand_to_face_distance is not None:
            if hand_to_face_distance < 50:  # 手很接近脸部
                drink_indicators.append(1.0)
            elif hand_to_face_distance < 80:  # 手比较接近脸部
                drink_indicators.append(0.7)
            elif hand_to_face_distance < 120:  # 手稍微接近脸部
                drink_indicators.append(0.3)
            else:
                drink_indicators.append(0.0)  # 手远离脸部，不太可能喝水

        # 3. 手部有适度活动
        if hand_activity is not None:
            if 0.1 <= hand_activity <= 0.8:  # 适度的手部活动
                drink_indicators.append(0.8)
            elif hand_activity > 0:
                drink_indicators.append(0.4)
            else:
                drink_indicators.append(0.0)

        # 4. 需要检测到水杯等相关物品（通过物品历史检查）
        has_drink_object = False
        current_time = time.time()
        for obj_name, start_time in self.allowed_behavior_start_time.items():
            if any(drink_word in obj_name.lower() for drink_word in ['bottle', 'cup', 'water']):
                if current_time - start_time < 60:  # 最近1分钟内检测到饮品
                    has_drink_object = True
                    break

        if has_drink_object:
            drink_indicators.append(1.0)
        else:
            drink_indicators.append(0.2)  # 没有饮品物品，降低喝水可能性

        # 5. 时序验证
        current_drink_score = np.mean(drink_indicators) if drink_indicators else 0.0
        self.temporal_behavior_validator['drink_sequence'].append(current_drink_score)

        # 喝水动作通常比较短暂，不需要太长的持续时间
        recent_scores = list(self.temporal_behavior_validator['drink_sequence'])[-3:]
        if len(recent_scores) >= 2:
            avg_recent_score = np.mean(recent_scores)
            # 需要平均分数超过0.7
            return bool(avg_recent_score > 0.7)

        return False

    def _is_adjusting_posture(self, features: Dict[str, Optional[float]]) -> bool:
        """
        检测是否在调整坐姿（优化版）

        基于身体姿态变化和稳定性指标
        """
        body_symmetry = features.get('body_symmetry', 1.0)
        sitting_posture = features.get('sitting_posture', 1.0)
        hand_activity = features.get('hand_activity', 0.0)

        # 调整坐姿的特征
        posture_indicators = []

        # 1. 身体对称性暂时较低
        if body_symmetry is not None and body_symmetry < 0.7:
            posture_indicators.append(0.8)

        # 2. 坐姿评分暂时较低
        if sitting_posture is not None and sitting_posture < 0.6:
            posture_indicators.append(0.8)

        # 3. 有轻微的手部或身体活动
        if hand_activity is not None and 0 < hand_activity < 0.5:
            posture_indicators.append(0.6)

        # 调整坐姿通常是短暂的，不需要严格的时序验证
        if posture_indicators:
            avg_score = np.mean(posture_indicators)
            return bool(avg_score > 0.6)

        return False
