#!/usr/bin/env python3
"""
儿童学习注意力检测演示脚本

这是一个简单的演示脚本，展示如何使用 AttentionDetector 进行实时注意力检测。
适合快速测试和验证功能。

运行方式：
    python examples/attention_detection_demo.py

依赖要求：
    - OpenCV (cv2)
    - NumPy
    - Ultralytics YOLO
    - 预训练的 YOLOv11 姿态估计模型
"""

import cv2
import time
import sys
import os
import numpy as np
from PIL import Image, ImageDraw, ImageFont

# 添加项目根目录到路径（必须在导入kidsbuddy模块之前）
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))

# 配置macOS环境（抑制系统警告）
try:
    from kidsbuddy.utils.macos_config import configure_macos_environment, configure_for_opencv_gui
    configure_macos_environment(verbose=True)
    configure_for_opencv_gui()
except ImportError:
    # 如果无法导入配置模块，使用基本的警告抑制
    import warnings
    import logging

    # 抑制macOS系统相关警告
    warnings.filterwarnings("ignore", message=".*IMKCFRunLoopWakeUpReliable.*")
    warnings.filterwarnings("ignore", message=".*mach port.*")
    warnings.filterwarnings("ignore", message=".*messaging the mach port.*")
    warnings.filterwarnings("ignore", category=UserWarning)

    # 设置环境变量
    os.environ['OPENCV_AVFOUNDATION_SKIP_AUTH'] = '1'
    os.environ['PYTHONWARNINGS'] = 'ignore'

    print("✓ 基本系统警告抑制已启用")

# 配置PyTorch环境以抑制NNPACK警告
try:
    from kidsbuddy.utils.torch_config import configure_torch_environment
    configure_torch_environment(suppress_warnings=True, configure_logging=True)
    print("✓ PyTorch环境配置完成")
except ImportError:
    # 如果无法导入配置模块，使用基本的警告抑制
    import warnings
    import logging

    warnings.filterwarnings("ignore", message=".*NNPACK.*")
    warnings.filterwarnings("ignore", message=".*Could not initialize NNPACK.*")

    os.environ['NNPACK_DISABLE'] = '1'
    os.environ['PYTORCH_DISABLE_NNPACK_CHECK'] = '1'

    logging.getLogger('torch').setLevel(logging.ERROR)
    logging.getLogger('ultralytics').setLevel(logging.ERROR)

    print("✓ 基本PyTorch警告抑制已启用")

try:
    from kidsbuddy.vision.attention_detector import AttentionDetector
except ImportError as e:
    print(f"Import Error: {e}")
    print("Please ensure the project path is correct and all dependencies are installed")
    sys.exit(1)


def put_chinese_text(img, text, position, font_size=20, color=(255, 255, 255)):
    """
    在OpenCV图像上绘制中文文本，带有备用方案

    参数:
        img: OpenCV图像 (BGR格式)
        text: 要绘制的中文文本
        position: 文本位置 (x, y)
        font_size: 字体大小
        color: 文本颜色 (B, G, R)

    返回:
        绘制了文本的图像
    """
    try:
        # 清理文本，移除可能导致乱码的emoji字符
        clean_text = text.replace("⚠️", "[警告]").replace("✓", "[正常]").replace("💡", "[建议]")

        # 将OpenCV图像转换为PIL图像
        img_pil = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
        draw = ImageDraw.Draw(img_pil)

        # 尝试加载中文字体
        font = None
        font_paths = [
            "/System/Library/Fonts/PingFang.ttc",
            "/System/Library/Fonts/STHeiti Light.ttc",
            "/System/Library/Fonts/Hiragino Sans GB.ttc",
            "/System/Library/Fonts/Arial Unicode MS.ttf",  # macOS
            "/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf",  # Linux
            "/usr/share/fonts/truetype/liberation/LiberationSans-Regular.ttf",  # Linux
            "C:/Windows/Fonts/msyh.ttc",  # Windows 微软雅黑
            "C:/Windows/Fonts/simhei.ttf",  # Windows 黑体
            "C:/Windows/Fonts/simsun.ttc",  # Windows 宋体
        ]

        for font_path in font_paths:
            if os.path.exists(font_path):
                try:
                    font = ImageFont.truetype(font_path, font_size)
                    # 测试字体是否能正确渲染中文
                    test_bbox = draw.textbbox((0, 0), "测试", font=font)
                    if test_bbox[2] > test_bbox[0]:  # 确保有宽度
                        break
                except Exception:
                    font = None
                    continue

        # 如果找不到合适的字体，使用默认字体
        if font is None:
            try:
                font = ImageFont.load_default()
            except Exception:
                # 最后的备用方案：使用OpenCV绘制
                return put_text_opencv_fallback(img, clean_text, position, font_size, color)

        # 绘制文本
        draw.text(position, clean_text, font=font, fill=color)

        # 转换回OpenCV格式
        img_cv = cv2.cvtColor(np.array(img_pil), cv2.COLOR_RGB2BGR)
        return img_cv

    except Exception as e:
        # 如果出现任何错误，使用OpenCV备用方案
        print(f"PIL text rendering failed: {e}, using OpenCV fallback")
        clean_text = text.replace("⚠️", "[警告]").replace("✓", "[正常]").replace("💡", "[建议]")
        return put_text_opencv_fallback(img, clean_text, position, font_size, color)


def put_text_opencv_fallback(img, text, position, font_size, color):
    """
    使用OpenCV绘制文本的备用方案
    """
    try:
        # 计算OpenCV字体大小
        cv_font_scale = font_size / 30.0  # 调整比例
        thickness = max(1, int(font_size / 15))

        # 使用OpenCV绘制文本
        cv2.putText(img, text, position, cv2.FONT_HERSHEY_SIMPLEX,
                   cv_font_scale, color, thickness, cv2.LINE_AA)
        return img
    except Exception as e:
        print(f"OpenCV fallback also failed: {e}")
        return img


def main():
    """主演示函数"""
    print("=" * 50)
    print("儿童学习注意力检测演示")
    print("=" * 50)

    # 初始化检测器
    print("正在初始化注意力检测器...")
    try:
        detector = AttentionDetector(
            model_path="yolo11n-pose.pt",  # 确保模型文件存在
            conf_threshold=0.3,
            history_window=30,
            movement_threshold=5.0,
            stability_threshold=90
        )
        print("✓ 检测器初始化成功")
    except Exception as e:
        print(f"✗ 检测器初始化失败: {e}")
        print("请确保 YOLO 模型文件存在且路径正确")
        return

    # 初始化摄像头
    print("正在初始化摄像头...")
    cap = cv2.VideoCapture(0)

    if not cap.isOpened():
        print("✗ 无法打开摄像头")
        print("请检查摄像头连接或尝试其他摄像头ID")
        return

    # 设置摄像头参数 - 使用更大的分辨率以获得更好的显示效果
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
    print("✓ 摄像头初始化成功")

    print("\n开始检测...")
    print("按 'q' 退出，按 'r' 重置历史数据")

    # 重置检测器历史
    detector.reset_history()

    # 状态颜色映射 (BGR格式)
    status_colors = {
        "专注学习": (0, 255, 0),      # 绿色
        "基本专注": (0, 200, 100),    # 浅绿色
        "轻微分心": (0, 255, 255),    # 黄色
        "明显分心": (0, 165, 255),    # 橙色
        "严重分心": (0, 0, 255),      # 红色
        "睡觉/疲劳": (128, 0, 128),   # 紫色
        "注意力不集中": (0, 100, 255),  # 橙红色
        "无法判断": (128, 128, 128),   # 灰色
        "未检测到人": (64, 64, 64)     # 深灰色
    }

    frame_count = 0
    start_time = time.time()
    fps = 0  # 初始化 fps 变量

    try:
        while True:
            # 读取帧
            ret, frame = cap.read()
            if not ret:
                print("无法读取摄像头帧")
                break

            # 镜像翻转
            frame = cv2.flip(frame, 1)

            # 执行检测
            status, reasons, keypoints_data, features = detector.process_frame(
                frame)

            # 绘制关键点
            if keypoints_data['kpts'] is not None and keypoints_data['confs'] is not None:
                kpts = keypoints_data['kpts']
                confs = keypoints_data['confs']

                # 绘制高置信度的关键点
                for kpt, conf in zip(kpts, confs):
                    if conf > 0.5:
                        x, y = int(kpt[0]), int(kpt[1])
                        cv2.circle(frame, (x, y), 3, (0, 0, 255), -1)

                # 绘制边界框
                if keypoints_data['bbox'] is not None:
                    bbox = keypoints_data['bbox']
                    x1, y1, x2, y2 = map(int, bbox[:4])
                    cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0), 2)

            # 绘制检测到的物品（排除人物）
            detected_objects = keypoints_data.get('detected_objects', [])
            for obj in detected_objects:
                bbox = obj['bbox']
                x1, y1, x2, y2 = map(int, bbox[:4])
                class_name = obj['class_name']
                confidence = obj['confidence']

                # 跳过人物类别
                if class_name.lower() in ['person', 'people', 'human', 'man', 'woman', 'child', 'boy', 'girl']:
                    continue

                # 根据物品类型选择颜色
                is_allowed, obj_info = detector.behavior_config.is_object_allowed(
                    class_name)
                obj_color = (0, 255, 0) if is_allowed else (
                    0, 0, 255)  # 绿色为允许，红色为禁止

                # 绘制物品边界框
                cv2.rectangle(frame, (x1, y1), (x2, y2), obj_color, 2)

                # 使用中文显示物品标签，避免乱码
                try:
                    obj_name = obj_info.get('name', class_name)
                    label_text = f"{obj_name} ({confidence:.2f})"
                    # 使用中文渲染函数
                    frame = put_chinese_text(frame, label_text, (x1, y1-25),
                                             font_size=12, color=obj_color)
                except Exception as e:
                    # 如果中文渲染失败，使用英文
                    label = f"{class_name} ({confidence:.2f})"
                    cv2.putText(frame, label, (x1, y1-10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, obj_color, 1)

            # ===== 专注力检测区域（左上角）=====
            color = status_colors.get(status, (255, 255, 255))

            # 专注力检测标题
            frame = put_chinese_text(frame, "=== 专注力检测 ===", (15, 25),
                                     font_size=18, color=(255, 255, 255))

            # 专注力状态
            frame = put_chinese_text(frame, f"状态: {status}", (15, 55),
                                     font_size=20, color=color)

            # 专注度评分
            focus_score = features.get('focus_score')
            if focus_score is not None:
                score_100 = int(focus_score * 100)  # 转换为100分制
                # 根据分数选择颜色
                if score_100 >= 70:
                    score_color = (0, 255, 0)  # 绿色
                elif score_100 >= 50:
                    score_color = (0, 255, 255)  # 黄色
                else:
                    score_color = (0, 165, 255)  # 橙色
                frame = put_chinese_text(frame, f"专注度: {score_100}/100分", (15, 85),
                                         font_size=18, color=score_color)

            # 专注力相关的主要原因（最多显示2个）
            focus_y_offset = 115
            focus_reasons = []
            for reason in reasons[:2]:  # 只显示前2个最重要的原因
                if any(keyword in reason for keyword in ['专注度', '头部', '视线', '东张西望', '偏移']):
                    focus_reasons.append(reason)

            for reason in focus_reasons[:2]:
                frame = put_chinese_text(frame, f"• {reason}", (15, focus_y_offset),
                                         font_size=14, color=(0, 200, 255))
                focus_y_offset += 25

            # ===== 坐姿检测区域（左下角）=====
            # 计算左下角位置
            posture_start_y = frame.shape[0] - 280  # 从底部向上280像素开始

            # 坐姿检测标题
            frame = put_chinese_text(frame, "=== 坐姿检测 ===", (15, posture_start_y),
                                     font_size=18, color=(255, 255, 255))

            posture_y_offset = posture_start_y + 30

            # 获取身体特征标签
            posture_labels = detector.get_posture_labels(features)

            # 显示坐姿标准性
            posture_label = posture_labels.get('posture_label')
            posture_score = features.get('posture_score')
            if posture_label:
                posture_color = (0, 255, 0) if posture_label in ['优秀', '良好'] else (
                    0, 165, 255) if posture_label == '一般' else (0, 0, 255)

                # 显示坐姿标签和评分
                if posture_score is not None:
                    frame = put_chinese_text(frame, f"坐姿: {posture_label} ({posture_score:.2f})", (15, posture_y_offset),
                                             font_size=16, color=posture_color)
                else:
                    frame = put_chinese_text(frame, f"坐姿: {posture_label}", (15, posture_y_offset),
                                             font_size=16, color=posture_color)
                posture_y_offset += 25

            # 显示头部姿态（坐姿相关）
            head_label = posture_labels.get('head_label')
            if head_label:
                head_color = (0, 255, 0) if head_label in ['优秀', '良好'] else (
                    0, 165, 255) if head_label == '一般' else (0, 0, 255)
                frame = put_chinese_text(frame, f"头部姿态: {head_label}", (15, posture_y_offset),
                                         font_size=16, color=head_color)
                posture_y_offset += 25

            # 显示头部扭动状态（合并显示以节省空间）
            head_tilt_status = posture_labels.get('head_tilt_status')
            head_twist_status = posture_labels.get('head_twist_status')

            # 合并头部状态显示
            head_status_parts = []
            if head_tilt_status and head_tilt_status != '头部端正':
                head_status_parts.append(head_tilt_status)
            if head_twist_status and head_twist_status != '头部正面':
                head_status_parts.append(head_twist_status)

            if head_status_parts:
                head_status_text = " | ".join(head_status_parts)
                status_color = (0, 165, 255) if any('轻微' in status for status in head_status_parts) else (0, 0, 255)
                frame = put_chinese_text(frame, f"头部状态: {head_status_text}", (15, posture_y_offset),
                                         font_size=14, color=status_color)
                posture_y_offset += 25

            # 显示身体对称性
            symmetry_label = posture_labels.get('symmetry_label')
            if symmetry_label and symmetry_label not in ['优秀', '良好']:  # 只在非优秀时显示
                symmetry_color = (0, 165, 255) if symmetry_label == '一般' else (0, 0, 255)
                frame = put_chinese_text(frame, f"身体对称: {symmetry_label}", (15, posture_y_offset),
                                         font_size=16, color=symmetry_color)
                posture_y_offset += 25

            # 显示人数检测结果（在坐姿区域）
            person_count = keypoints_data.get('person_count', 0)
            if person_count > 1:
                frame = put_chinese_text(frame, f"[警告] 检测到{person_count}个人", (15, posture_y_offset),
                                         font_size=14, color=(0, 0, 255))
            elif person_count == 1:
                frame = put_chinese_text(frame, "[正常] 单人环境", (15, posture_y_offset),
                                         font_size=14, color=(0, 255, 0))

            # ===== 物品检测和其他信息（中间区域）=====
            # 显示物品检测结果
            object_analysis = keypoints_data.get('object_analysis', {})
            detected_objects = keypoints_data.get('detected_objects', [])

            # 中间区域起始位置
            middle_y_start = 200
            middle_y_offset = middle_y_start

            # 显示检测到的物品数量（仅当有物品时）
            if detected_objects:
                frame = put_chinese_text(frame, f"检测到{len(detected_objects)}个物品", (15, middle_y_offset),
                                         font_size=14, color=(0, 255, 255))
                middle_y_offset += 25

            # 显示警告信息（最多显示1个以节省空间）
            if object_analysis.get('warnings'):
                warning = object_analysis['warnings'][0]  # 只显示第一个最重要的警告
                frame = put_chinese_text(frame, f"[警告] {warning}", (15, middle_y_offset),
                                         font_size=14, color=(0, 0, 255))
                middle_y_offset += 25

            # 显示建议信息（最多显示1个）
            if object_analysis.get('suggestions'):
                suggestion = object_analysis['suggestions'][0]
                frame = put_chinese_text(frame, f"[建议] {suggestion}", (15, middle_y_offset),
                                         font_size=14, color=(0, 255, 255))
                middle_y_offset += 25

            # 计算并显示FPS
            frame_count += 1
            current_time = time.time()
            elapsed_time = current_time - start_time
            fps = frame_count / elapsed_time if elapsed_time > 0 else 0
            cv2.putText(frame, f"FPS: {fps:.1f}", (frame.shape[1]-120, 35),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)

            # ===== 控制提示（右下角）=====
            # 显示控制提示
            frame = put_chinese_text(frame, "按 'q' 退出，按 'r' 重置",
                                     (frame.shape[1]-250, frame.shape[0]-25),
                                     font_size=16, color=(0, 255, 0))

            # 显示图像 - 设置窗口可调整大小
            cv2.namedWindow('Attention Detection Demo', cv2.WINDOW_NORMAL)
            cv2.resizeWindow('Attention Detection Demo', 1280, 720)
            cv2.imshow('Attention Detection Demo', frame)

            # 处理按键 - 增加更多的退出方式
            key = cv2.waitKey(1) & 0xFF
            if key == ord('q') or key == ord('Q') or key == 27:  # q, Q, 或 ESC
                print("正在退出...")
                break
            elif key == ord('r') or key == ord('R'):
                detector.reset_history()
                frame_count = 0
                start_time = time.time()
                print("✓ 已重置历史数据")

            # 检查窗口是否被关闭
            try:
                if cv2.getWindowProperty('Attention Detection Demo', cv2.WND_PROP_VISIBLE) < 1:
                    print("窗口已关闭，正在退出...")
                    break
            except cv2.error:
                print("窗口已关闭，正在退出...")
                break

    except KeyboardInterrupt:
        print("\n检测被用户中断")

    finally:
        # 清理资源
        cap.release()
        cv2.destroyAllWindows()

        # 计算最终统计数据
        final_elapsed_time = time.time() - start_time
        final_fps = frame_count / final_elapsed_time if final_elapsed_time > 0 else 0

        # 显示最终统计
        print(f"\n检测完成:")
        print(f"总帧数: {frame_count}")
        print(f"运行时长: {final_elapsed_time:.1f}秒")
        print(f"平均FPS: {final_fps:.1f}")

        # 获取注意力摘要
        if frame_count > 10:
            summary = detector.get_attention_summary(
                window_size=min(30, frame_count))
            if "focus_percentage" in summary:
                print(f"专注度: {summary['focus_percentage']:.1f}%")


if __name__ == "__main__":
    main()
