#!/usr/bin/env python3
"""
实时文字区域检测测试

基于 book_content_detection_demo.py 的逻辑，测试智能文字区域检测器的实时性能。
包含手指检测、书本检测和文字区域检测的完整流程。
"""

import os
import sys
import cv2
import numpy as np
import time
import argparse
import math
from typing import List, Tuple, Optional, Dict, Any
from dataclasses import dataclass

# 确保能够导入kidsbuddy包
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

# 导入智能文字区域检测器
from kidsbuddy.vision import TextRegionDetector, TextRegionResult

# MediaPipe导入
try:
    import mediapipe as mp
    MEDIAPIPE_AVAILABLE = True
    print("✓ MediaPipe已成功导入")
except ImportError:
    MEDIAPIPE_AVAILABLE = False
    mp = None
    print("⚠ MediaPipe未安装，手指检测功能将被禁用")

# YOLO导入
try:
    from ultralytics import YOLO
    YOLO_AVAILABLE = True
    print("✓ YOLO已成功导入")
except ImportError:
    YOLO_AVAILABLE = False
    print("⚠ YOLO未安装，将使用备用书本检测")


@dataclass
class DetectionBox:
    """检测框数据类"""
    x: int
    y: int
    width: int
    height: int
    confidence: float
    label: str
    color: Tuple[int, int, int]


@dataclass
class FingerInfo:
    """手指信息数据类"""
    tip_position: Tuple[int, int]
    direction: Tuple[float, float]
    landmarks: List[Tuple[int, int]]
    confidence: float
    hand_type: str  # "Left" or "Right"


class RealtimeTextDetectionSystem:
    """实时文字检测系统"""
    
    def __init__(self, camera_id: int = 0, resolution: Tuple[int, int] = (1920, 1080)):
        """
        初始化实时检测系统
        
        参数:
            camera_id: 摄像头ID
            resolution: 摄像头分辨率
        """
        self.camera_id = camera_id
        self.resolution = resolution
        
        # 初始化摄像头
        self.cap = cv2.VideoCapture(camera_id)
        self._setup_camera()
        
        # 初始化检测器
        self._setup_detectors()
        
        # 性能统计
        self.frame_count = 0
        self.start_time = time.time()
        self.fps = 0
        self.processing_times = []
        
        # 显示参数
        self.display_width = 1280
        self.display_height = 720
        
        print(f"✓ 实时文字检测系统初始化完成")
        print(f"  摄像头: {self.width}x{self.height} @ {self.fps_cap}fps")
        print(f"  显示: {self.display_width}x{self.display_height}")
    
    def _setup_camera(self):
        """设置摄像头参数"""
        if not self.cap.isOpened():
            raise RuntimeError("无法打开摄像头")
        
        # 设置摄像头参数
        self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.resolution[0])
        self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.resolution[1])
        self.cap.set(cv2.CAP_PROP_FPS, 30)
        self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
        
        # 获取实际参数
        self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        self.fps_cap = self.cap.get(cv2.CAP_PROP_FPS)
    
    def _setup_detectors(self):
        """初始化各种检测器"""
        # 手指检测器
        if MEDIAPIPE_AVAILABLE:
            self.mp_hands = mp.solutions.hands
            self.mp_drawing = mp.solutions.drawing_utils
            self.hands = self.mp_hands.Hands(
                static_image_mode=False,
                max_num_hands=2,
                min_detection_confidence=0.7,
                min_tracking_confidence=0.5
            )
            print("✓ MediaPipe手指检测器已初始化")
        else:
            self.hands = None
            print("✗ 手指检测器不可用")
        
        # 书本检测器
        if YOLO_AVAILABLE:
            try:
                # 尝试加载YOLOv11模型
                model_paths = [
                    "kidsbuddy/models/yolov11x.pt",
                    "../kidsbuddy/models/yolov11x.pt",
                    "yolov11x.pt",
                    "yolov8n.pt"
                ]
                
                self.yolo_model = None
                for path in model_paths:
                    if os.path.exists(path):
                        self.yolo_model = YOLO(path)
                        print(f"✓ YOLO模型已加载: {path}")
                        break
                
                if self.yolo_model is None:
                    self.yolo_model = YOLO("yolov8n.pt")
                    print("✓ 使用预训练YOLO模型")
                    
            except Exception as e:
                print(f"✗ YOLO模型加载失败: {e}")
                self.yolo_model = None
        else:
            self.yolo_model = None
        
        # 文字区域检测器 - 使用更精确的参数
        self.text_detector = TextRegionDetector(
            debug_mode=False,  # 实时检测时关闭调试模式
            min_text_area=200,  # 提高最小面积，避免检测到噪点
            min_char_size=15,   # 提高最小字符尺寸
            expand_margin=10,   # 减小扩展边距，提高精度
            horizontal_connect_kernel=10,  # 减小水平连接核
            vertical_connect_kernel=3      # 减小垂直连接核
        )
        print("✓ 智能文字区域检测器已初始化")
    
    def detect_fingers(self, image: np.ndarray) -> List[FingerInfo]:
        """检测手指 - 使用与demo相同的逻辑"""
        if not MEDIAPIPE_AVAILABLE or self.hands is None:
            return []

        # 获取原始图像尺寸
        original_h, original_w = image.shape[:2]

        # 翻转图像进行检测（摄像头镜像修正）
        detection_image = cv2.flip(image, 1)

        # 转换BGR到RGB
        rgb_image = cv2.cvtColor(detection_image, cv2.COLOR_BGR2RGB)
        results = self.hands.process(rgb_image)

        finger_infos = []

        if results.multi_hand_landmarks and results.multi_handedness:
            for hand_landmarks, handedness in zip(results.multi_hand_landmarks, results.multi_handedness):
                # 获取手的类型（在翻转图像上检测的结果是正确的，不需要修正）
                hand_type = handedness.classification[0].label
                confidence = handedness.classification[0].score

                # 转换landmarks到像素坐标（基于检测图像）
                h, w = detection_image.shape[:2]
                detection_landmarks = []
                for landmark in hand_landmarks.landmark:
                    x = int(landmark.x * w)
                    y = int(landmark.y * h)
                    detection_landmarks.append((x, y))

                # 将坐标转换回原始图像坐标系
                landmarks = []
                for x, y in detection_landmarks:
                    # 将翻转图像的坐标转换为原始图像坐标
                    original_x = original_w - 1 - x
                    landmarks.append((original_x, y))

                # 获取食指指尖和指向方向
                if len(landmarks) >= 21:  # MediaPipe手部模型有21个关键点
                    # 食指指尖是第8个点，食指第二关节是第6个点
                    index_tip = landmarks[8]
                    index_pip = landmarks[6]

                    # 计算指向方向
                    dx = index_tip[0] - index_pip[0]
                    dy = index_tip[1] - index_pip[1]

                    # 标准化方向向量
                    magnitude = math.sqrt(dx*dx + dy*dy)
                    if magnitude > 0:
                        dx, dy = dx / magnitude, dy / magnitude

                    finger_info = FingerInfo(
                        tip_position=index_tip,
                        direction=(dx, dy),
                        landmarks=landmarks,
                        confidence=confidence,
                        hand_type=hand_type
                    )
                    finger_infos.append(finger_info)

        return finger_infos
    
    def detect_books(self, image: np.ndarray) -> List[DetectionBox]:
        """检测书本"""
        books = []
        
        if self.yolo_model is not None:
            try:
                results = self.yolo_model(image, conf=0.25, verbose=False)
                
                for result in results:
                    if hasattr(result, 'boxes') and result.boxes is not None:
                        for box in result.boxes:
                            x1, y1, x2, y2 = box.xyxy[0].cpu().numpy()
                            conf = float(box.conf[0].cpu().numpy())
                            cls = int(box.cls[0].cpu().numpy())
                            
                            # 检查是否为书本相关类别
                            if self._is_book_class(cls) or self._is_potential_book(x1, y1, x2, y2, image.shape):
                                book = DetectionBox(
                                    x=int(x1), y=int(y1),
                                    width=int(x2-x1), height=int(y2-y1),
                                    confidence=conf,
                                    label="Book",
                                    color=(0, 255, 0)
                                )
                                books.append(book)
            except Exception as e:
                print(f"YOLO检测出错: {e}")
        
        # 如果没有检测到书本，使用备用方法
        if not books:
            books = self._fallback_book_detection(image)
        
        return books
    
    def _is_book_class(self, class_id: int) -> bool:
        """判断是否为书本类别"""
        book_class_ids = {73}  # COCO数据集中的book类别ID
        return class_id in book_class_ids
    
    def _is_potential_book(self, x1: float, y1: float, x2: float, y2: float, image_shape: tuple) -> bool:
        """判断是否为潜在的书本"""
        h, w = image_shape[:2]
        box_w = x2 - x1
        box_h = y2 - y1
        area = box_w * box_h
        relative_area = area / (w * h)
        aspect_ratio = box_w / box_h if box_h > 0 else 0
        
        return (0.05 < relative_area < 0.8 and 0.3 < aspect_ratio < 4.0)
    
    def _fallback_book_detection(self, image: np.ndarray) -> List[DetectionBox]:
        """备用书本检测方法"""
        h, w = image.shape[:2]
        
        # 简单的边缘检测方法
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (5, 5), 0)
        edges = cv2.Canny(blurred, 50, 150)
        
        kernel = np.ones((5, 5), np.uint8)
        dilated = cv2.dilate(edges, kernel, iterations=2)
        
        contours, _ = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        
        books = []
        min_area = w * h * 0.05
        max_area = w * h * 0.9
        
        for contour in contours:
            area = cv2.contourArea(contour)
            if min_area < area < max_area:
                x, y, width, height = cv2.boundingRect(contour)
                aspect_ratio = width / height if height > 0 else 0
                if 0.2 < aspect_ratio < 5.0:
                    confidence = min(0.8, area / (w * h * 0.3))
                    book = DetectionBox(
                        x=x, y=y, width=width, height=height,
                        confidence=confidence, label="Book (Fallback)",
                        color=(0, 255, 0)
                    )
                    books.append(book)
        
        books.sort(key=lambda b: b.width * b.height, reverse=True)
        return books[:2]  # 最多返回2本书

    def find_closest_book(self, finger_info: FingerInfo, books: List[DetectionBox]) -> Optional[DetectionBox]:
        """找到与手指最近的书本"""
        if not finger_info or not books:
            return None

        tip_x, tip_y = finger_info.tip_position
        min_distance = float('inf')
        closest_book = None

        for book in books:
            book_center_x = book.x + book.width // 2
            book_center_y = book.y + book.height // 2
            distance = math.sqrt((tip_x - book_center_x)**2 + (tip_y - book_center_y)**2)

            if distance < min_distance:
                min_distance = distance
                closest_book = book

        return closest_book

    def detect_text_region(self, image: np.ndarray, finger_info: FingerInfo,
                          books: List[DetectionBox]) -> Optional[DetectionBox]:
        """检测文字区域"""
        if not finger_info or not books:
            return None

        tip_x, tip_y = finger_info.tip_position
        dx, dy = finger_info.direction

        # 在书本区域内查找文字内容
        for book in books:
            book_x, book_y = book.x, book.y
            book_w, book_h = book.width, book.height

            # 提取书本区域
            book_image = image[book_y:book_y+book_h, book_x:book_x+book_w]
            if book_image.size == 0:
                continue

            # 将手指坐标转换到书本坐标系
            book_tip_x = tip_x - book_x
            book_tip_y = tip_y - book_y

            # 检查手指是否指向书本区域
            if not (0 <= book_tip_x < book_w and 0 <= book_tip_y < book_h):
                continue

            # 使用智能文字区域检测器 - 更精确的参数
            result = self.text_detector.detect_pointed_text_region(
                book_image,
                (book_tip_x, book_tip_y),
                (dx, dy),
                search_length=80  # 大幅减小搜索长度，专注于手指附近区域
            )

            if result.success and result.text_box:
                # 转换回原始图像坐标系
                content_x = book_x + result.text_box[0]
                content_y = book_y + result.text_box[1]
                content_w = result.text_box[2]
                content_h = result.text_box[3]

                return DetectionBox(
                    x=content_x, y=content_y,
                    width=content_w, height=content_h,
                    confidence=result.confidence,
                    label=f"Text ({result.confidence:.2f})",
                    color=(0, 255, 255)  # 黄色
                )

        return None

    def process_frame(self, image: np.ndarray) -> Dict:
        """处理单帧图像"""
        start_time = time.time()

        # 1. 检测手指
        finger_infos = self.detect_fingers(image)

        # 2. 检测书本
        all_books = self.detect_books(image)

        # 3. 只保留与手指最近的书本
        books = []
        if finger_infos and all_books:
            closest_book = self.find_closest_book(finger_infos[0], all_books)
            if closest_book:
                books = [closest_book]

        # 4. 检测文字区域
        text_region = None
        if finger_infos and books:
            text_region = self.detect_text_region(image, finger_infos[0], books)

        processing_time = (time.time() - start_time) * 1000
        self.processing_times.append(processing_time)

        # 保持最近100帧的处理时间
        if len(self.processing_times) > 100:
            self.processing_times.pop(0)

        return {
            'finger_infos': finger_infos,
            'books': books,
            'text_region': text_region,
            'processing_time': processing_time,
            'avg_processing_time': sum(self.processing_times) / len(self.processing_times)
        }

    def draw_results(self, image: np.ndarray, results: Dict) -> np.ndarray:
        """绘制检测结果"""
        vis_image = image.copy()

        # 1. 绘制书本框（绿色）
        for book in results.get('books', []):
            cv2.rectangle(vis_image,
                         (book.x, book.y),
                         (book.x + book.width, book.y + book.height),
                         book.color, 3)
            cv2.putText(vis_image, f"{book.label} {book.confidence:.2f}",
                       (book.x, book.y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, book.color, 2)

        # 2. 绘制手指检测（红色）
        for finger_info in results.get('finger_infos', []):
            # 绘制手部关键点
            for i, (x, y) in enumerate(finger_info.landmarks):
                if i == 8:  # 食指指尖
                    cv2.circle(vis_image, (x, y), 8, (0, 0, 255), -1)
                else:
                    cv2.circle(vis_image, (x, y), 3, (255, 0, 0), -1)

            # 绘制指向方向
            tip_x, tip_y = finger_info.tip_position
            direction_length = 100
            end_x = int(tip_x + finger_info.direction[0] * direction_length)
            end_y = int(tip_y + finger_info.direction[1] * direction_length)

            cv2.arrowedLine(vis_image, (tip_x, tip_y), (end_x, end_y), (0, 0, 255), 3)

            # 标注手的类型
            cv2.putText(vis_image, f"{finger_info.hand_type} Hand",
                       (tip_x + 10, tip_y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        # 3. 绘制文字区域框（黄色）
        text_region = results.get('text_region')
        if text_region:
            cv2.rectangle(vis_image,
                         (text_region.x, text_region.y),
                         (text_region.x + text_region.width,
                          text_region.y + text_region.height),
                         text_region.color, 3)
            cv2.putText(vis_image, text_region.label,
                       (text_region.x, text_region.y - 10),
                       cv2.FONT_HERSHEY_SIMPLEX, 0.7, text_region.color, 2)

        return vis_image

    def run(self):
        """运行实时检测"""
        print("\n🚀 开始实时文字区域检测测试")
        print("=" * 50)
        print("控制说明:")
        print("- 'q' 或 ESC: 退出")
        print("- 's': 保存当前帧")
        print("- 'r': 重置统计信息")
        print("- 'h': 显示/隐藏帮助信息")
        print("=" * 50)

        cv2.namedWindow("Realtime Text Detection", cv2.WINDOW_NORMAL)
        cv2.resizeWindow("Realtime Text Detection", self.display_width, self.display_height)

        show_help = True

        try:
            while True:
                # 读取帧
                ret, frame = self.cap.read()
                if not ret:
                    print("❌ 无法读取摄像头画面")
                    break

                # 调整到显示大小
                display_frame = cv2.resize(frame, (self.display_width, self.display_height))

                # 处理帧
                results = self.process_frame(display_frame)

                # 绘制结果
                vis_image = self.draw_results(display_frame, results)

                # 更新FPS
                self.frame_count += 1
                elapsed_time = time.time() - self.start_time
                if elapsed_time >= 1.0:
                    self.fps = self.frame_count / elapsed_time
                    self.frame_count = 0
                    self.start_time = time.time()

                # 添加状态信息
                self._draw_status_info(vis_image, results, show_help)

                # 显示图像
                cv2.imshow("Realtime Text Detection", vis_image)

                # 处理键盘输入
                key = cv2.waitKey(1) & 0xFF
                if key == 27 or key == ord('q'):  # ESC 或 q 键退出
                    break
                elif key == ord('s'):  # 保存当前帧
                    timestamp = time.strftime("%Y%m%d_%H%M%S")
                    filename = f"text_detection_{timestamp}.jpg"
                    cv2.imwrite(filename, vis_image)
                    print(f"📸 已保存: {filename}")
                elif key == ord('r'):  # 重置统计
                    self.processing_times.clear()
                    self.frame_count = 0
                    self.start_time = time.time()
                    print("🔄 统计信息已重置")
                elif key == ord('h'):  # 切换帮助显示
                    show_help = not show_help

        except KeyboardInterrupt:
            print("\n⚠ 用户中断")

        finally:
            self.cleanup()

    def _draw_status_info(self, image: np.ndarray, results: Dict, show_help: bool):
        """绘制状态信息"""
        # 基本状态信息
        info_texts = [
            f"FPS: {self.fps:.1f}",
            f"Books: {len(results.get('books', []))}",
            f"Fingers: {len(results.get('finger_infos', []))}",
            f"Text: {'Yes' if results.get('text_region') else 'No'}",
            f"Process: {results.get('processing_time', 0):.1f}ms",
            f"Avg: {results.get('avg_processing_time', 0):.1f}ms"
        ]

        # 绘制状态信息背景
        overlay = image.copy()
        cv2.rectangle(overlay, (10, 10), (350, 200), (0, 0, 0), -1)
        cv2.addWeighted(overlay, 0.7, image, 0.3, 0, image)

        # 绘制状态文字
        for i, text in enumerate(info_texts):
            cv2.putText(image, text, (20, 40 + i * 25),
                       cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)

        # 绘制帮助信息
        if show_help:
            help_texts = [
                "Controls:",
                "Q/ESC - Quit",
                "S - Save frame",
                "R - Reset stats",
                "H - Toggle help"
            ]

            help_y_start = image.shape[0] - 150
            cv2.rectangle(overlay, (10, help_y_start - 10), (200, image.shape[0] - 10), (0, 0, 0), -1)
            cv2.addWeighted(overlay, 0.7, image, 0.3, 0, image)

            for i, text in enumerate(help_texts):
                cv2.putText(image, text, (20, help_y_start + 20 + i * 20),
                           cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)

    def cleanup(self):
        """清理资源"""
        if self.cap:
            self.cap.release()
        cv2.destroyAllWindows()

        # 打印最终统计
        print("\n📊 检测统计:")
        print(f"  平均FPS: {self.fps:.1f}")
        if self.processing_times:
            avg_time = sum(self.processing_times) / len(self.processing_times)
            max_time = max(self.processing_times)
            min_time = min(self.processing_times)
            print(f"  平均处理时间: {avg_time:.1f}ms")
            print(f"  最大处理时间: {max_time:.1f}ms")
            print(f"  最小处理时间: {min_time:.1f}ms")

        print("🏁 实时检测测试结束")


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="实时文字区域检测测试")
    parser.add_argument("--camera", type=int, default=0, help="摄像头ID (默认: 0)")
    parser.add_argument("--resolution", type=str, default="1920x1080",
                       help="摄像头分辨率 (默认: 1920x1080)")

    args = parser.parse_args()

    # 解析分辨率
    try:
        width, height = map(int, args.resolution.split('x'))
        resolution = (width, height)
    except ValueError:
        print("❌ 分辨率格式错误，使用默认值 1920x1080")
        resolution = (1920, 1080)

    try:
        # 创建并运行检测系统
        system = RealtimeTextDetectionSystem(
            camera_id=args.camera,
            resolution=resolution
        )
        system.run()

    except Exception as e:
        print(f"❌ 系统启动失败: {e}")
        return 1

    return 0


if __name__ == "__main__":
    exit(main())
