#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
PoseGuard - 基于骨骼关键点检测的实时姿态评估系统
作者: 元歌
开发地: 广州南沙铁建中心
时间: 2024年10月
"""

import cv2
import sys
import argparse
import time
import numpy as np
from typing import Optional, Dict, List, Tuple
import os
import re

from pose_detector import PoseDetector
from pose_classifier import PoseClassifier
from behavior_detector import BehaviorDetector
from utils import resize_image, draw_chinese_text, ActionLogger, get_pose_color
from chinese_display import draw_chinese_text_enhanced, create_info_panel_chinese, load_chinese_font
from PIL import Image, ImageDraw
from typing import Optional as _Optional

# 可选的人脸识别
try:
    from face_recognizer import FaceRecognizer
except Exception:
    FaceRecognizer = None  # type: ignore

class PoseAssessmentApp:
    """姿态评估应用程序"""
    
    def __init__(self):
        """初始化应用程序"""
        self.detector = PoseDetector()
        self.classifier = PoseClassifier()
        self.behavior_detector = BehaviorDetector()
        # 多人行为检测器池（按索引简单复用）
        self.multi_behavior_detectors: List[BehaviorDetector] = []
        # 基于OpenCV HOG的人体检测器（无需额外依赖）
        self.hog = cv2.HOGDescriptor()
        self.hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
        # 优先尝试YOLOv8（如可用）
        self.yolo_model = None
        try:
            from ultralytics import YOLO  # type: ignore
            yolo_model_path = 'yolov8n.pt'
            if os.path.exists(yolo_model_path):
                self.yolo_model = YOLO(yolo_model_path)
            else:
                self.yolo_model = YOLO('yolov8n.pt')
            try:
                # 设置较低conf以提高召回，再靠后续姿态过滤
                self.yolo_model.overrides['conf'] = 0.35
            except Exception:
                pass
            print("多人检测: YOLOv8 已启用")
        except Exception as e:
            self.yolo_model = None
            print("多人检测: YOLOv8 不可用，使用HOG回退。", e)
        self.action_logger = ActionLogger()
        self.log_history = []
        self.last_state = None
        self.behavior_summary_timer = time.time()
        # 离岗计时（无关键点）
        self.no_keypoints_start_time: Optional[float] = None
        self.off_post_active: bool = False
        self.off_post_threshold_sec: float = 5.0
        self.current_off_post_duration_sec: float = 0.0
        self.off_post_accumulated_sec: float = 0.0
        # 左上角说明文字（只显示标题）
        self.header_title: str = "润生软件姿态识别2D版 1.0"
        # 人脸识别（可选）
        try:
            self.face_recognizer: _Optional[FaceRecognizer] = FaceRecognizer() if FaceRecognizer is not None else None
        except Exception:
            self.face_recognizer = None
        # 绑定管理：在人脸识别成功后，将姓名与最近的目标框绑定，后续优先显示姓名
        # 每项: { 'bbox': (x,y,w,h), 'name': str, 'last_seen': float, 'last_verified': float }
        self.bound_tracks: List[Dict[str, object]] = []
        self.bound_track_max_age_sec: float = 3.0
        self.bound_track_ttl_sec: float = 3600.0  # 1小时有效期
        # 单人模式绑定
        self.single_bound_name: _Optional[str] = None
        self.single_bound_last_verified: _Optional[float] = None
        
    def process_image(self, image):
        """
        处理单张图像
        
        Args:
            image: 输入图像
            
        Returns:
            tuple: (处理后的图像, 姿态信息)
        """
        # 调整图像大小
        resized_image = resize_image(image)
        
        # 优先进行多人人体检测
        person_boxes = self._detect_people_boxes(resized_image)
        detected_any_keypoints = False

        if len(person_boxes) > 0:
            # 确保行为检测器池大小足够
            if len(self.multi_behavior_detectors) < len(person_boxes):
                for _ in range(len(person_boxes) - len(self.multi_behavior_detectors)):
                    self.multi_behavior_detectors.append(BehaviorDetector())

            people_results = []
            annotated_image = resized_image.copy()

            for idx, (x, y, w, h) in enumerate(person_boxes):
                # 边界裁剪
                x0 = max(0, x)
                y0 = max(0, y)
                x1 = min(annotated_image.shape[1], x + w)
                y1 = min(annotated_image.shape[0], y + h)
                if x1 <= x0 or y1 <= y0:
                    continue

                roi = annotated_image[y0:y1, x0:x1]
                landmarks, annotated_roi = self.detector.detect_pose(roi)

                if landmarks is not None:
                    detected_any_keypoints = True
                    # 将带关键点的ROI贴回原图
                    try:
                        annotated_image[y0:y1, x0:x1] = annotated_roi
                    except Exception:
                        pass

                    # 姿态与行为
                    pose_result = self.classifier.classify_pose(landmarks)
                    behavior_result = self.multi_behavior_detectors[idx].add_pose(landmarks, pose_result)

                    # 记录结果
                    # 先准备显示标签：若已绑定姓名则优先；否则默认ID；若当前识别成功则绑定并使用姓名
                    bbox_curr = (x0, y0, x1 - x0, y1 - y0)
                    display_label = self._get_bound_name_for_bbox(bbox_curr) or f"ID {idx}"
                    try:
                        if self.face_recognizer is not None:
                            rec = self.face_recognizer.recognize_face_from_roi(roi)
                            if isinstance(rec, tuple) and len(rec) >= 1 and isinstance(rec[0], str):
                                name = rec[0].strip()
                                if name:
                                    display_label = name
                                    self._bind_bbox_with_name(bbox_curr, name, verified=True)
                    except Exception:
                        pass

                    people_results.append({
                        'id': idx,
                        'bbox': bbox_curr,
                        'pose': pose_result,
                        'behavior': behavior_result,
                        'display_label': display_label,
                    })

                    # 现场标注：每人一行紧凑信息（透明背景）
                    person_pose_cn = self._get_pose_chinese_name(pose_result.get('pose', 'unknown'))
                    person_pose_conf = float(pose_result.get('confidence', 0.0))
                    person_behavior_cn = behavior_result.get('chinese_name', '未知') if behavior_result else '未知'
                    person_behavior_conf = float(behavior_result.get('confidence', 0.0)) if behavior_result else 0.0

                    info_text = f"{display_label} 姿态: {person_pose_cn} ({person_pose_conf:.0%}) | 行为: {person_behavior_cn} ({person_behavior_conf:.0%})"
                    text_x = x0 + 5
                    text_y = max(12, y0 + 12)
                    annotated_image = draw_chinese_text_enhanced(
                        annotated_image,
                        info_text,
                        (text_x, text_y),
                        font_size=12,
                        color=(255, 255, 255),
                        background_color=None,
                        background_padding=0,
                    )
                    # 绘制美化外框（颜色随姿态）
                    try:
                        pose_key = pose_result.get('pose', 'unknown') if isinstance(pose_result, dict) else 'unknown'
                        color = get_pose_color(pose_key)
                    except Exception:
                        color = (128, 128, 128)
                    self._draw_fancy_bbox_inplace(annotated_image, (x0, y0, x1 - x0, y1 - y0), color)

                    # 日志与周期性摘要（取第一个人用于全局日志）
                    if idx == 0:
                        self._update_log_history(pose_result, behavior_result)
                        self._output_status_summary(pose_result, behavior_result)

                else:
                    # 未检测到关键点，仍绘制美化外框和ID/姓名
                    display_label = self._get_bound_name_for_bbox((x0, y0, x1 - x0, y1 - y0)) or f"ID {idx}"
                    try:
                        color = (128, 128, 128)
                        self._draw_fancy_bbox_inplace(annotated_image, (x0, y0, x1 - x0, y1 - y0), color)
                        # 顶部一行简要信息（仅显示ID/姓名）
                        info_text = f"{display_label}"
                        annotated_image = draw_chinese_text_enhanced(
                            annotated_image,
                            info_text,
                            (x0 + 5, max(12, y0 + 12)),
                            font_size=12,
                            color=(255, 255, 255),
                            background_color=None,
                            background_padding=0,
                        )
                    except Exception:
                        pass
            # 顶部左侧汇总单行（透明背景）- 取第一个人的结果作为全局快速信息
            if detected_any_keypoints:
                # 更新离岗计时（检测到关键点）
                self._update_off_post(detected=True)
                self._cleanup_bound_tracks()
                first = people_results[0]
                result_image = self._draw_info(
                    annotated_image,
                    first.get('pose'),
                    first.get('behavior'),
                    display_label=first.get('display_label', f"ID {first.get('id', 0)}")
                )
                return result_image, {
                    'pose': first.get('pose'),
                    'behavior': first.get('behavior'),
                    'people': people_results,
                }
            else:
                # 没有有效人的细节
                # 更新离岗计时（未检测到关键点）
                self._update_off_post(detected=False)
                result_image = self._draw_info(resized_image, None, None)
                return result_image, {}

        # 否则退回到单人模式
        landmarks, annotated_image = self.detector.detect_pose(resized_image)
        if landmarks is not None:
            self._update_off_post(detected=True)
            pose_result = self.classifier.classify_pose(landmarks)
            behavior_result = self.behavior_detector.add_pose(landmarks, pose_result)
            # 单人模式：全图尝试识别人脸
            # 单人绑定有效期检查
            if self.single_bound_name and self.single_bound_last_verified is not None:
                if (time.time() - self.single_bound_last_verified) > self.bound_track_ttl_sec:
                    self.single_bound_name = None
                    self.single_bound_last_verified = None
            display_label_single = self.single_bound_name or "ID 0"
            try:
                if self.face_recognizer is not None:
                    rec = self.face_recognizer.recognize_in_bgr(resized_image)
                    if isinstance(rec, tuple) and len(rec) >= 1 and isinstance(rec[0], str) and rec[0].strip():
                        self.single_bound_name = rec[0].strip()
                        self.single_bound_last_verified = time.time()
                        display_label_single = self.single_bound_name
            except Exception:
                pass
            result_image = self._draw_info(annotated_image, pose_result, behavior_result, display_label=display_label_single)
            self._update_log_history(pose_result, behavior_result)
            self._output_status_summary(pose_result, behavior_result)
            return result_image, {'pose': pose_result, 'behavior': behavior_result}
        else:
            self._update_off_post(detected=False)
            # 单人模式：若已绑定且超过TTL则在无帧识别时也进行失效判断
            if self.single_bound_name and self.single_bound_last_verified is not None:
                if (time.time() - self.single_bound_last_verified) > self.bound_track_ttl_sec:
                    self.single_bound_name = None
                    self.single_bound_last_verified = None
            result_image = self._draw_info(resized_image, None, None)
            return result_image, {}

    def _detect_people_boxes(self, image: np.ndarray) -> List[Tuple[int, int, int, int]]:
        """检测多人边界框：优先YOLO，失败时回退HOG。"""
        # 尝试YOLOv8
        if self.yolo_model is not None:
            try:
                results = self.yolo_model(image)
                boxes: List[Tuple[int, int, int, int]] = []
                for res in results:
                    if getattr(res, 'boxes', None) is None:
                        continue
                    for box in res.boxes:
                        try:
                            cls_id = int(box.cls[0].cpu().numpy()) if hasattr(box, 'cls') else -1
                            if cls_id != 0:
                                continue  # 只保留person
                            x1, y1, x2, y2 = box.xyxy[0].cpu().numpy()
                            x, y, w, h = int(x1), int(y1), int(x2 - x1), int(y2 - y1)
                            if w >= 20 and h >= 40:
                                boxes.append((x, y, w, h))
                        except Exception:
                            continue
                boxes.sort(key=lambda b: b[2] * b[3], reverse=True)
                if boxes:
                    return boxes
            except Exception as e:
                print("YOLO检测失败，回退到HOG:", e)

        # 回退 HOG
        try:
            rects, weights = self.hog.detectMultiScale(
                image,
                winStride=(8, 8),
                padding=(16, 16),
                scale=1.05,
                hitThreshold=0.0,
                useMeanshiftGrouping=False,
            )
            return [(int(x), int(y), int(w), int(h)) for (x, y, w, h) in rects]
        except Exception:
            return []
    
    def _draw_info(self, image, pose_result, behavior_result, display_label: str = "ID 0"):
        """在图像上绘制信息（优化版）"""
        h, w = image.shape[:2]
        
        # 准备信息字典
        info_dict = {
            'pose': pose_result,
            'behavior': behavior_result
        }
        
        # 改为左上角单行紧凑信息显示（避免占用过多空间）
        pose_name_cn = '未识别'
        pose_conf = 0.0
        if pose_result:
            pose_name_cn = self._get_pose_chinese_name(pose_result.get('pose', 'unknown'))
            pose_conf = float(pose_result.get('confidence', 0.0))

        behavior_name_cn = '未知'
        behavior_conf = 0.0
        if behavior_result:
            behavior_name_cn = behavior_result.get('chinese_name', '未知')
            behavior_conf = float(behavior_result.get('confidence', 0.0))

        compact_text = f"检测用户{display_label}  姿态: {pose_name_cn} ({pose_conf:.0%}) | 行为: {behavior_name_cn} ({behavior_conf:.0%})"
        # 底部小半透明背景条 + 文本垂直居中放置，避免被裁切
        try:
            overlay = image.copy()
            x1, y1 = 5, h - 50
            x2, y2 = min(w - 5, 5 + 800), h - 5
            cv2.rectangle(overlay, (x1, y1), (x2, y2), (0, 0, 0), -1)
            cv2.addWeighted(overlay, 0.4, image, 0.6, 0, image)

            # 计算文本高度以便垂直居中
            try:
                font = load_chinese_font(16)
                # 使用一个临时画布测量文本尺寸
                tmp_img = Image.new('RGB', (10, 10))
                tmp_draw = ImageDraw.Draw(tmp_img)
                bbox = tmp_draw.textbbox((0, 0), compact_text, font=font)
                text_height = max(0, bbox[3] - bbox[1])
            except Exception:
                text_height = 20

            rect_height = max(0, y2 - y1)
            text_y = y1 + max(2, (rect_height - text_height) // 2)
            text_x = x1 + 10

            # 先整体用红色绘制
            image = draw_chinese_text_enhanced(
                image,
                compact_text,
                (text_x, text_y),
                font_size=16,
                color=(0, 0, 255),  # BGR: 红色
                background_color=None,
                background_padding=0,
            )

            # 将数值部分（含百分号或纯数字，如 ID 0）保持为白色，覆盖绘制
            try:
                image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                pil_image = Image.fromarray(image_rgb)
                draw = ImageDraw.Draw(pil_image)
                font = load_chinese_font(16)

                number_pattern = re.compile(r"[0-9]+(?:\.[0-9]+)?%?")
                for m in number_pattern.finditer(compact_text):
                    prefix = compact_text[:m.start()]
                    bbox_pre = draw.textbbox((0, 0), prefix, font=font)
                    x_offset = bbox_pre[2] - bbox_pre[0]
                    # 以白色重绘数值片段
                    draw.text((text_x + x_offset, text_y), m.group(0), font=font, fill=(255, 255, 255))

                image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
            except Exception:
                pass
        except Exception:
            # 兜底：若上面失败则回退到原固定位置（略微上移，避免裁切），底色为红色，数值覆盖为白色
            text_x_fallback, text_y_fallback = 10, max(5, h - 28)
            image = draw_chinese_text_enhanced(
                image,
                compact_text,
                (text_x_fallback, text_y_fallback),
                font_size=16,
                color=(0, 0, 255),
                background_color=None,
                background_padding=0,
            )
            try:
                image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                pil_image = Image.fromarray(image_rgb)
                draw = ImageDraw.Draw(pil_image)
                font = load_chinese_font(16)
                number_pattern = re.compile(r"[0-9]+(?:\.[0-9]+)?%?")
                for m in number_pattern.finditer(compact_text):
                    prefix = compact_text[:m.start()]
                    bbox_pre = draw.textbbox((0, 0), prefix, font=font)
                    x_offset = bbox_pre[2] - bbox_pre[0]
                    draw.text((text_x_fallback + x_offset, text_y_fallback), m.group(0), font=font, fill=(255, 255, 255))
                image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
            except Exception:
                pass
        
        # 左上角：系统标题与可识别姿势说明
        try:
            image = draw_chinese_text_enhanced(
                image, self.header_title, (10, 10),
                font_size=14, color=(255, 255, 255),
                background_color=(0, 0, 0), background_padding=3
            )
        except Exception:
            pass

        # 添加系统信息（中文）
        fps_text = f"帧率: {self._calculate_fps():.1f} FPS"
        image = draw_chinese_text_enhanced(image, fps_text, (w - 200, 30), 
                                        font_size=14, color=(0, 255, 0),
                                        background_color=(0, 0, 0), background_padding=3)

        # 显示离岗计时（无关键点）
        off_text_lines = self._format_off_post_text()
        if off_text_lines:
            # 与FPS左对齐
            y_base = 55
            for line in off_text_lines:
                image = draw_chinese_text_enhanced(
                    image, line, (w - 200, y_base),
                    font_size=12, color=(0, 255, 255),
                    background_color=(0, 0, 0), background_padding=2
                )
                y_base += 20
        
        return image

    def _draw_fancy_bbox_inplace(self, image: np.ndarray, bbox: Tuple[int,int,int,int], color: Tuple[int,int,int]) -> None:
        """绘制较美观的外框：半透明背景 + 四角高亮线段。
        Args:
            image: BGR图
            bbox: (x, y, w, h)
            color: BGR颜色
        """
        try:
            x, y, w, h = bbox
            x1, y1, x2, y2 = x, y, x + w, y + h
            h_img, w_img = image.shape[:2]
            if x1 >= w_img or y1 >= h_img or x2 <= 0 or y2 <= 0:
                return
            # 半透明背景
            overlay = image.copy()
            cv2.rectangle(overlay, (x1, y1), (x2, y2), color, -1)
            cv2.addWeighted(overlay, 0.15, image, 0.85, 0, image)
            # 边框
            cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
            # 四角高亮装饰
            corner = max(8, min(20, min(w, h)//6))
            thickness = 2
            # 左上
            cv2.line(image, (x1, y1), (x1 + corner, y1), color, thickness)
            cv2.line(image, (x1, y1), (x1, y1 + corner), color, thickness)
            # 右上
            cv2.line(image, (x2, y1), (x2 - corner, y1), color, thickness)
            cv2.line(image, (x2, y1), (x2, y1 + corner), color, thickness)
            # 左下
            cv2.line(image, (x1, y2), (x1 + corner, y2), color, thickness)
            cv2.line(image, (x1, y2), (x1, y2 - corner), color, thickness)
            # 右下
            cv2.line(image, (x2, y2), (x2 - corner, y2), color, thickness)
            cv2.line(image, (x2, y2), (x2, y2 - corner), color, thickness)
        except Exception:
            pass

    # ---------------- 绑定与匹配辅助 ----------------
    def _bbox_iou(self, b1: Tuple[int,int,int,int], b2: Tuple[int,int,int,int]) -> float:
        x1, y1, w1, h1 = b1
        x2, y2, w2, h2 = b2
        xa1, ya1, xa2, ya2 = x1, y1, x1 + w1, y1 + h1
        xb1, yb1, xb2, yb2 = x2, y2, x2 + w2, y2 + h2
        inter_x1 = max(xa1, xb1)
        inter_y1 = max(ya1, yb1)
        inter_x2 = min(xa2, xb2)
        inter_y2 = min(ya2, yb2)
        inter_w = max(0, inter_x2 - inter_x1)
        inter_h = max(0, inter_y2 - inter_y1)
        inter = inter_w * inter_h
        area_a = w1 * h1
        area_b = w2 * h2
        union = area_a + area_b - inter
        if union <= 0:
            return 0.0
        return inter / union

    def _bbox_center_distance(self, b1: Tuple[int,int,int,int], b2: Tuple[int,int,int,int]) -> float:
        x1, y1, w1, h1 = b1
        x2, y2, w2, h2 = b2
        c1x, c1y = x1 + w1 * 0.5, y1 + h1 * 0.5
        c2x, c2y = x2 + w2 * 0.5, y2 + h2 * 0.5
        dx = c1x - c2x
        dy = c1y - c2y
        return float((dx * dx + dy * dy) ** 0.5)

    def _get_bound_name_for_bbox(self, bbox: Tuple[int,int,int,int]) -> _Optional[str]:
        if not self.bound_tracks:
            return None
        now = time.time()
        best_idx = -1
        best_score = -1.0
        for i, tr in enumerate(self.bound_tracks):
            tr_bbox = tr.get('bbox')  # type: ignore
            if not isinstance(tr_bbox, tuple):
                continue
            iou = self._bbox_iou(bbox, tr_bbox)  # type: ignore
            # 先按IoU匹配
            if iou > best_score:
                best_score = iou
                best_idx = i
        name: _Optional[str] = None
        if best_idx >= 0 and best_score >= 0.3:
            tr = self.bound_tracks[best_idx]
            # TTL 检查
            last_verified = float(tr.get('last_verified', 0.0)) if isinstance(tr.get('last_verified'), (int, float)) else 0.0  # type: ignore
            if (now - last_verified) <= self.bound_track_ttl_sec:
                name = tr.get('name') if isinstance(tr.get('name'), str) else None  # type: ignore
            else:
                name = None
            # 更新轨迹
            tr['bbox'] = bbox  # type: ignore
            tr['last_seen'] = now  # type: ignore
        else:
            # 如果IoU不够，考虑中心点距离最近的一个
            best_idx = -1
            best_dist = None
            for i, tr in enumerate(self.bound_tracks):
                tr_bbox = tr.get('bbox')  # type: ignore
                if not isinstance(tr_bbox, tuple):
                    continue
                dist = self._bbox_center_distance(bbox, tr_bbox)  # type: ignore
                if best_dist is None or dist < best_dist:
                    best_dist = dist
                    best_idx = i
            if best_idx >= 0 and (best_dist is not None) and best_dist <= 80.0:
                tr = self.bound_tracks[best_idx]
                last_verified = float(tr.get('last_verified', 0.0)) if isinstance(tr.get('last_verified'), (int, float)) else 0.0  # type: ignore
                if (now - last_verified) <= self.bound_track_ttl_sec:
                    name = tr.get('name') if isinstance(tr.get('name'), str) else None  # type: ignore
                else:
                    name = None
                tr['bbox'] = bbox  # type: ignore
                tr['last_seen'] = now  # type: ignore
        return name

    def _bind_bbox_with_name(self, bbox: Tuple[int,int,int,int], name: str, verified: bool = True) -> None:
        now = time.time()
        # 若已有同名轨迹，更新其bbox
        for tr in self.bound_tracks:
            if tr.get('name') == name:
                tr['bbox'] = bbox  # type: ignore
                tr['last_seen'] = now  # type: ignore
                if verified:
                    tr['last_verified'] = now  # type: ignore
                return
        entry: Dict[str, object] = {'bbox': bbox, 'name': name, 'last_seen': now}
        if verified:
            entry['last_verified'] = now
        else:
            entry['last_verified'] = 0.0
        self.bound_tracks.append(entry)

    def _cleanup_bound_tracks(self) -> None:
        now = time.time()
        self.bound_tracks = [tr for tr in self.bound_tracks if (now - float(tr.get('last_seen', 0.0))) <= self.bound_track_max_age_sec]

    # 供外部调用：清空所有绑定（例如删除人员或训练完成后）
    def clear_bindings(self) -> None:
        self.bound_tracks = []
        self.single_bound_name = None
        self.single_bound_last_verified = None

    def _update_off_post(self, detected: bool):
        """更新离岗状态计时：当未检测到任何关键点时在阈值后开始计时。"""
        now = time.time()
        if detected:
            # 从离岗状态恢复
            if self.off_post_active:
                # 累计当前离岗时长
                self.off_post_accumulated_sec += self.current_off_post_duration_sec
                try:
                    print(f"离岗结束，本次时长: {self._format_seconds(self.current_off_post_duration_sec)}，累计: {self._format_seconds(self.off_post_accumulated_sec)}")
                except Exception:
                    pass
            # 重置状态
            self.no_keypoints_start_time = None
            self.current_off_post_duration_sec = 0.0
            self.off_post_active = False
            return

        # 未检测到关键点
        if self.no_keypoints_start_time is None:
            self.no_keypoints_start_time = now
            self.current_off_post_duration_sec = 0.0
            self.off_post_active = False
            return

        elapsed = now - self.no_keypoints_start_time
        if not self.off_post_active:
            if elapsed >= self.off_post_threshold_sec:
                self.off_post_active = True
                self.current_off_post_duration_sec = elapsed - self.off_post_threshold_sec
                try:
                    print(f"离岗开始（超过{int(self.off_post_threshold_sec)}秒无关键点）")
                except Exception:
                    pass
        else:
            self.current_off_post_duration_sec = max(0.0, elapsed - self.off_post_threshold_sec)

    def _format_off_post_text(self) -> List[str]:
        """格式化离岗计时显示文本。"""
        lines: List[str] = []
        total = self.off_post_accumulated_sec + (self.current_off_post_duration_sec if self.off_post_active else 0.0)
        if self.off_post_active or total > 0.0:
            total_str = self._format_seconds(total)
            if self.off_post_active:
                current_str = self._format_seconds(self.current_off_post_duration_sec)
                lines.append(f"离岗(当前): {current_str}")
            lines.append(f"离岗(累计): {total_str}")
        return lines

    def _format_seconds(self, seconds: float) -> str:
        secs = int(max(0, round(seconds)))
        h = secs // 3600
        m = (secs % 3600) // 60
        s = secs % 60
        if h > 0:
            return f"{h:02d}:{m:02d}:{s:02d}"
        return f"{m:02d}:{s:02d}"
    
    def _calculate_fps(self):
        """计算FPS"""
        current_time = time.time()
        # 简单的FPS计算
        if not hasattr(self, '_fps_time'):
            self._fps_time = current_time
            self._fps_count = 0
            return 0.0
        
        self._fps_count += 1
        if current_time - self._fps_time >= 1.0:
            fps = self._fps_count / (current_time - self._fps_time)
            self._fps_time = current_time
            self._fps_count = 0
            return fps
        return self._fps_count / max(1e-6, current_time - self._fps_time)
    
    def _get_pose_chinese_name(self, pose):
        """获取姿态中文名称"""
        pose_names = {
            'standing': '站立',
            'sitting': '坐下',
            'lying_down': '躺下',
            'squatting': '蹲下',
            'walking': '步行',
            'unknown': '未识别'
        }
        return pose_names.get(pose, '未知')
    
    def _get_action_chinese_name(self, action):
        """获取动作中文名称"""
        action_names = {
            'stationary': '静止',
            'moving': '移动',
            'walking': '步行',
            'waving': '挥手',
            'raising_hand': '举手',
            'unknown': '未知'
        }
        return action_names.get(action, '未知')
    
    def _update_log_history(self, pose_result, behavior_result):
        """更新日志历史"""
        current_time = time.strftime("%H:%M:%S")
        
        # 更新状态日志（一行显示）
        if pose_result and behavior_result:
            pose_name = self._get_pose_chinese_name(pose_result.get('pose', 'unknown'))
            pose_conf = pose_result.get('confidence', 0.0)
            
            behavior_name = behavior_result.get('chinese_name', '未知')
            behavior_conf = behavior_result.get('confidence', 0.0)
            
            # 一行显示格式
            log_entry = f"检测用户ID 0 目前姿态为{pose_name}，可信度: {pose_conf:.0%}   可能正在{behavior_name}  可信度{behavior_conf:.0%}"
            
            # 只有当状态发生变化时才添加日志
            current_state = f"{pose_name}_{behavior_name}"
            if current_state != self.last_state:
                self.log_history.append(log_entry)
                self.last_state = current_state
                
                # 保持日志历史不超过10条
                if len(self.log_history) > 10:
                    self.log_history.pop(0)
                    
                # 打印状态变化
                print(f"状态变化: {pose_name} -> {behavior_name}")

                # 写入动作日志到文件（状态变化时）
                try:
                    self.action_logger.log(
                        person_id=0,
                        pose=pose_result.get('pose', 'unknown'),
                        pose_conf=float(pose_result.get('confidence', 0.0)),
                        behavior=behavior_result.get('behavior', 'unknown'),
                        behavior_conf=float(behavior_result.get('confidence', 0.0))
                    )
                except Exception:
                    pass
    
    def _output_status_summary(self, pose_result, behavior_result):
        """定期输出状态摘要"""
        current_time = time.time()
        
        # 每5秒输出一次状态摘要
        if current_time - self.behavior_summary_timer >= 5.0:
            if pose_result and behavior_result:
                pose_name = self._get_pose_chinese_name(pose_result.get('pose', 'unknown'))
                pose_conf = pose_result.get('confidence', 0.0)
                behavior_name = behavior_result.get('chinese_name', '未知')
                behavior_conf = behavior_result.get('confidence', 0.0)
                
                print("="*50)
                print(f"状态摘要 [{time.strftime('%H:%M:%S')}]:")
                print(f"  当前姿态: {pose_name} (置信度: {pose_conf:.1%})")
                print(f"  当前行为: {behavior_name} (置信度: {behavior_conf:.1%})")
                print("="*50)

                # 每5秒也记录一条周期性日志（便于持续留痕）
                try:
                    self.action_logger.log(
                        person_id=0,
                        pose=pose_result.get('pose', 'unknown'),
                        pose_conf=float(pose_result.get('confidence', 0.0)),
                        behavior=behavior_result.get('behavior', 'unknown'),
                        behavior_conf=float(behavior_result.get('confidence', 0.0))
                    )
                except Exception:
                    pass
            
            self.behavior_summary_timer = current_time
    
    def run_camera(self, camera_id: int = 0):
        """
        运行摄像头实时检测
        
        Args:
            camera_id: 摄像头ID
        """
        print("启动摄像头实时检测...")
        print("按 'q' 键退出，按 's' 键保存当前帧")
        print("底部将显示状态日志：检测用户ID 0 目前姿态为...")
        
        # 打开摄像头
        cap = cv2.VideoCapture(camera_id)
        
        if not cap.isOpened():
            print("无法打开摄像头")
            return
        
        # 设置摄像头参数
        cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
        cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
        cap.set(cv2.CAP_PROP_FPS, 30)
        
        fps_counter = 0
        start_time = time.time()
        
        try:
            while True:
                ret, frame = cap.read()
                if not ret:
                    print("无法读取摄像头画面")
                    break
                
                # 处理图像
                result_image, pose_info = self.process_image(frame)
                
                # 计算FPS
                fps_counter += 1
                if fps_counter % 30 == 0:
                    elapsed_time = time.time() - start_time
                    fps = fps_counter / elapsed_time
                    pose_name = pose_info.get('pose', {}).get('pose', '未识别')
                    pose_conf = pose_info.get('pose', {}).get('confidence', 0.0)
                    
                    # 获取行为信息
                    behavior_info = pose_info.get('behavior', {})
                    behavior_name = behavior_info.get('chinese_name', '未知')
                    behavior_conf = behavior_info.get('confidence', 0.0)
                    
                    print(f"FPS: {fps:.1f} - 姿态: {self._get_pose_chinese_name(pose_name)} ({pose_conf:.1%}) - 行为: {behavior_name} ({behavior_conf:.1%})")
                
                # 显示图像
                cv2.imshow('PoseGuard - 姿态评估系统', result_image)
                
                # 处理按键
                key = cv2.waitKey(1) & 0xFF
                if key == ord('q'):
                    break
                elif key == ord('s'):
                    # 保存当前帧
                    timestamp = time.strftime("%Y%m%d_%H%M%S")
                    filename = f"pose_snapshot_{timestamp}.jpg"
                    cv2.imwrite(filename, result_image)
                    print(f"已保存截图: {filename}")
        
        except KeyboardInterrupt:
            print("\n程序被用户中断")
        except Exception as e:
            print(f"程序运行出错: {e}")
        finally:
            cap.release()
            cv2.destroyAllWindows()
            print("程序已退出")

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='PoseGuard - 姿态评估系统')
    parser.add_argument('--camera', type=int, default=0, help='摄像头ID (默认: 0)')
    parser.add_argument('--mode', type=str, default='camera', choices=['camera'], help='运行模式')
    
    args = parser.parse_args()
    
    print("PoseGuard姿态评估系统")
    print("=" * 40)
    print("检测架构: 增强版检测")
    print("检测流程: 姿态检测 → 姿态分类 → 行为识别")
    print("支持的姿势: 站立、坐下、躺下、蹲下")
    print("支持的行为: 举手(左/右/双)、伸手(左/右/双/平伸)、吃东西、走路、转身、聊天、工作、睡觉、跳跃/双膝上抬")
    print("=" * 40)
    print("按 'q' 键退出，按 's' 键保存当前帧")
    print("系统将实时显示姿态和行为信息")
    print("=" * 40)
    
    # 创建应用程序
    app = PoseAssessmentApp()
    
    # 运行摄像头检测
    app.run_camera(args.camera)

if __name__ == '__main__':
    main()