
#!/usr/bin/env python3
"""
基于YOLO、TensorFlow与OpenCV的实时手势识别与虚拟绘图系统
主程序文件
"""

import cv2
import numpy as np
import tensorflow as tf
from ultralytics import YOLO
import json
import os
from collections import deque

class HandGestureRecognitionSystem:
    def __init__(self):
        """初始化系统"""
        # 初始化摄像头 - 尝试多个索引
        self.cap = None
        for camera_index in [0, 1, 2]:
            self.cap = cv2.VideoCapture(camera_index)
            if self.cap.isOpened():
                print(f"成功打开摄像头索引 {camera_index}")
                break
            else:
                self.cap.release()
        
        # 检查摄像头是否成功打开
        if self.cap is None or not self.cap.isOpened():
            print("错误：无法打开任何摄像头")
            return
        
        self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
        self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
        
        # 加载 YOLO 手部检测权重
        print("正在加载 YOLO 手部检测权重...")
        yolo_model_path = 'yolo/hand_yolov8n.pt'
        if not os.path.exists(yolo_model_path):
            print(f"错误：未找到手部检测模型 {yolo_model_path}")
            print("请确保模型文件存在")
            return
        self.yolo_model = YOLO(yolo_model_path)
        
        # 加载手势分类模型（Keras 3格式）
        print("正在加载手势分类模型...")
        model_candidates = [
            'models/static_gesture_classifier_best.keras',
            'models/static_gesture_classifier_final.keras',
        ]
        model_path = None
        
        for candidate in model_candidates:
            if os.path.exists(candidate):
                model_path = candidate
                print(f"找到模型: {candidate}")
                break
        
        if model_path:
            self.gesture_model = tf.keras.models.load_model(model_path)
        else:
            print("错误：手势分类模型不存在，请先训练模型")
            print("运行命令: python train_gesture_model.py")
            self.gesture_model = None
        
        # 加载类别映射
        if os.path.exists('models/class_mapping.json'):
            with open('models/class_mapping.json', 'r') as f:
                self.class_mapping = json.load(f)
            self.reverse_mapping = {v: k for k, v in self.class_mapping.items()}
        else:
            print("警告：类别映射文件不存在")
            self.reverse_mapping = {}
        
        # 中英文手势标签映射
        self.chinese_mapping = {
            "palm": "手掌",
            "l": "L形",
            "fist": "握拳",
            "fist_moved": "移动握拳",
            "thumb": "大拇指",
            "index": "食指",
            "ok": "OK手势",
            "palm_moved": "移动手掌",
            "c": "C形",
            "down": "向下",
            "unknown": "未知手势"
        }
        
        # 中文字体路径（Windows 常见字体，按顺序尝试）
        self.font_path = None
        for candidate in [
            r"C:\Windows\Fonts\msyh.ttc",   # 微软雅黑
            r"C:\Windows\Fonts\simhei.ttf", # 黑体
            r"C:\Windows\Fonts\simsun.ttc"  # 宋体
        ]:
            if os.path.exists(candidate):
                self.font_path = candidate
                break
        
        # 创建虚拟画布
        self.canvas = None
        self.canvas_initialized = False
        
        # 手势状态跟踪
        self.gesture_buffer = deque(maxlen=5)  # 用于动态手势判断
        self.previous_hand_center = None
        
        # 绘图状态
        self.drawing = False
        self.pen_color = (0, 255, 0)  # 默认绿色
        self.pen_thickness = 3
        
        # 颜色选项
        self.colors = [
            (0, 255, 0),    # 绿色
            (255, 0, 0),    # 蓝色
            (0, 0, 255),    # 红色
            (255, 255, 0),  # 青色
            (255, 0, 255), # 紫色
            (0, 255, 255)   # 黄色
        ]
        self.color_index = 0
        
        print("系统初始化完成！")
    
    # ==== 可调参数 ====
        # YOLO 检测阈值与候选过滤
        self.detection_conf_threshold = 0.25
        self.min_box_area = 5000  # 过滤过小框，单位：像素^2
        # 跟踪：上一帧中心邻近优先
        self.tracking_max_distance = 200  # 与上一帧中心的最大距离（像素）
        # 分类判定阈值
        self.index_draw_conf_threshold = 0.6  # 只有食指类别置信度超过该阈值才允许绘图
    # ==================

    def enhance_low_light(self, image):
        """
        自动增强低光图像
        使用CLAHE（自适应直方图均衡化）增强对比度
        """
        # 转换到LAB色彩空间
        lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
        l, a, b = cv2.split(lab)
        
        # 检测亮度：如果平均亮度低于阈值，进行增强
        mean_brightness = np.mean(l)
        
        if mean_brightness < 100:  # 低光环境阈值
            # 应用CLAHE到L通道
            clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
            l_enhanced = clahe.apply(l)
            
            # 合并通道
            lab_enhanced = cv2.merge([l_enhanced, a, b])
            enhanced = cv2.cvtColor(lab_enhanced, cv2.COLOR_LAB2BGR)
            
            # 可选：进一步提升亮度
            if mean_brightness < 60:  # 极暗环境
                enhanced = cv2.convertScaleAbs(enhanced, alpha=1.3, beta=20)
            
            return enhanced, True  # 返回增强图像和标志
        
        return image, False  # 亮度足够，不增强
    
    def preprocess_hand_image(self, hand_roi):
        """预处理手部区域图像"""
        # 调整大小为模型输入尺寸
        hand_roi = cv2.resize(hand_roi, (224, 224))
        # 归一化
        hand_roi = hand_roi.astype('float32') / 255.0
        # 添加批次维度
        hand_roi = np.expand_dims(hand_roi, axis=0)
        return hand_roi
    
    def put_chinese_text(self, image_bgr, text, position, color=(255, 0, 0), font_size=20):
        """
        使用 Pillow 在图像上绘制中文文本，避免 OpenCV 无法显示中文的问题
        """
        try:
            from PIL import Image, ImageDraw, ImageFont
            # BGR -> RGB
            image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
            pil_img = Image.fromarray(image_rgb)
            draw = ImageDraw.Draw(pil_img)
            # 选择字体
            if self.font_path and os.path.exists(self.font_path):
                font = ImageFont.truetype(self.font_path, font_size)
            else:
                font = ImageFont.load_default()
            # Pillow 使用 RGB 颜色
            draw.text(position, text, font=font, fill=(color[2], color[1], color[0]))
            # RGB -> BGR
            return cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR)
        except Exception:
            # 兜底：若 Pillow 绘制失败，退回英文或ASCII
            try:
                cv2.putText(image_bgr, text.encode('ascii', errors='ignore').decode('ascii'),
                            position, cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2)
            except Exception:
                pass
            return image_bgr
    
    def classify_gesture(self, hand_roi):
        """分类手势"""
        if self.gesture_model is None:
            return "unknown", 0.0
        
        # 预处理图像
        processed_hand = self.preprocess_hand_image(hand_roi)
        
        # 预测
        predictions = self.gesture_model.predict(processed_hand, verbose=0)
        
        # 获取最高置信度的类别
        class_idx = np.argmax(predictions[0])
        confidence = predictions[0][class_idx]
        
        # 映射到类别名称
        gesture_name = self.reverse_mapping.get(class_idx, "unknown")
        
        return gesture_name, confidence
    
    def detect_dynamic_gesture(self, current_gesture, hand_center):
        """检测动态手势（点击、滑动等）"""
        # 添加到手势缓冲区
        self.gesture_buffer.append(current_gesture)
        
        # 点击判断：连续N帧都是食指手势且移动距离小
        if len(self.gesture_buffer) >= 5:
            if all(gesture == "index" for gesture in self.gesture_buffer):
                if self.previous_hand_center is not None:
                    # 计算移动距离
                    movement = np.linalg.norm(
                        np.array(hand_center) - np.array(self.previous_hand_center)
                    )
                    if movement < 10:  # 移动距离阈值
                        return "click"
        
        # 滑动判断：连续手掌手势
        if len(self.gesture_buffer) >= 3:
            if all(gesture == "palm" for gesture in self.gesture_buffer):
                return "swipe"
        
        return None
    
    def handle_gesture_action(self, gesture, dynamic_gesture, hand_center):
        """处理手势动作"""
        # 处理动态手势
        if dynamic_gesture == "click":
            # 切换画笔颜色
            self.color_index = (self.color_index + 1) % len(self.colors)
            self.pen_color = self.colors[self.color_index]
            print(f"点击事件：切换到颜色 {self.color_index}")
            self.gesture_buffer.clear()  # 清空缓冲区
        
        # 处理静态手势
        if gesture == "index":
            # 开始/继续绘图
            self.drawing = True
            if self.previous_hand_center is not None:
                # 在画布上画线
                cv2.line(self.canvas, 
                        tuple(self.previous_hand_center), 
                        tuple(hand_center), 
                        self.pen_color, 
                        self.pen_thickness)
        
        elif gesture == "fist":
            # 停止绘图
            self.drawing = False
        
        elif gesture == "palm":
            # 清空画布
            if dynamic_gesture == "swipe":
                self.canvas = np.zeros_like(self.canvas)
                print("滑动清空画布")
        
        # 更新上一帧的手部中心点
        self.previous_hand_center = hand_center
    
    def run(self):
        """运行主循环"""
        print("按 'q' 键退出程序")
        print("手势说明：")
        print("- 食指：开始绘图")
        print("- 握拳：停止绘图") 
        print("- 手掌：清空画布")
        print("- 连续食指（不动）：切换颜色")
        
        while True:
            # 读取帧
            ret, frame = self.cap.read()
            if not ret:
                print("错误：无法读取帧")
                break
            
            # 低光增强
            frame, is_enhanced = self.enhance_low_light(frame)
            
            # 初始化画布
            if not self.canvas_initialized:
                self.canvas = np.zeros_like(frame)
                self.canvas_initialized = True
            
            # 使用 YOLO 手模型检测，并从候选中选择最佳手框
            hand_detected = False
            current_gesture = "unknown"
            confidence = 0.0
            hand_center = None
            selected_box_xyxy = None
            candidates = []

            results = self.yolo_model(
                frame,
                conf=self.detection_conf_threshold,
                iou=0.45,
                max_det=5,
                verbose=False
            )

            for result in results:
                boxes = result.boxes
                if boxes is None or len(boxes) == 0:
                    continue
                for b in boxes:
                    # 过滤低置信度框
                    if hasattr(b, 'conf') and b.conf is not None:
                        conf_val = float(b.conf[0].cpu().numpy())
                        if conf_val < self.detection_conf_threshold:
                            continue
                    x1, y1, x2, y2 = b.xyxy[0].cpu().numpy().astype(int)
                    # 框面积过滤
                    area = max(0, (x2 - x1)) * max(0, (y2 - y1))
                    if area < self.min_box_area:
                        continue
                    # 扩边，保证手完整
                    margin = 20
                    xx1 = max(0, x1 - margin)
                    yy1 = max(0, y1 - margin)
                    xx2 = min(frame.shape[1], x2 + margin)
                    yy2 = min(frame.shape[0], y2 + margin)
                    roi = frame[yy1:yy2, xx1:xx2]
                    if roi.size == 0:
                        continue
                    # 分类
                    g, conf_g = self.classify_gesture(roi)
                    center = ((x1 + x2) // 2, (y1 + y2) // 2)
                    # 评分：分类置信度 + 距离奖励
                    score = float(conf_g)
                    if self.previous_hand_center is not None:
                        dist = np.linalg.norm(np.array(center) - np.array(self.previous_hand_center))
                        if dist <= self.tracking_max_distance:
                            score += max(0.0, 1.0 - dist / self.tracking_max_distance) * 0.5
                    candidates.append((score, g, conf_g, center, (x1, y1, x2, y2)))

            if candidates:
                candidates.sort(key=lambda x: x[0], reverse=True)
                _, current_gesture, confidence, hand_center, raw_xyxy = candidates[0]
                hand_detected = True
                selected_box_xyxy = raw_xyxy

            # 可视化已选择的框与标签
            if hand_detected and selected_box_xyxy is not None:
                rx1, ry1, rx2, ry2 = selected_box_xyxy
                cv2.rectangle(frame, (rx1, ry1), (rx2, ry2), (255, 0, 0), 2)
                chinese_gesture = self.chinese_mapping.get(current_gesture, "未知手势")
                label = f"{chinese_gesture}: {confidence:.2f}"
                frame = self.put_chinese_text(frame, label, (rx1, max(0, ry1-22)), color=(255, 0, 0), font_size=20)
                cv2.circle(frame, hand_center, 5, (0, 255, 255), -1)
            
            # 检测动态手势
            dynamic_gesture = None
            if hand_detected and hand_center is not None:
                dynamic_gesture = self.detect_dynamic_gesture(current_gesture, hand_center)
            
            # 处理手势动作
            if hand_detected:
                # 仅当“食指”且置信度达到阈值时允许绘图，减少误触
                effective_gesture = current_gesture
                if current_gesture == "index" and confidence < self.index_draw_conf_threshold:
                    effective_gesture = "unknown"
                self.handle_gesture_action(effective_gesture, dynamic_gesture, hand_center)
            else:
                # 没有检测到手部时停止绘图
                self.drawing = False
                self.previous_hand_center = None
            
            # 显示动态手势状态
            if dynamic_gesture:
                # 将英文动态手势标签转换为中文
                chinese_dynamic = "点击" if dynamic_gesture == "click" else "滑动"
                frame = self.put_chinese_text(frame, f"动态手势: {chinese_dynamic}", (10, 60), color=(0, 255, 255), font_size=22)
            
            # 显示当前颜色
            color_text = f"Color: {self.color_index}"
            cv2.putText(frame, color_text, (10, 90), 
                      cv2.FONT_HERSHEY_SIMPLEX, 0.7, self.pen_color, 2)

            # 显示阈值与调参提示
            info1 = f"绘图阈值: {self.index_draw_conf_threshold:.2f} ([/])"
            frame = self.put_chinese_text(frame, info1, (10, 120), color=(0, 255, 0), font_size=20)
            info2 = f"检测阈值: {self.detection_conf_threshold:.2f} (-/=)"
            frame = self.put_chinese_text(frame, info2, (10, 150), color=(0, 255, 0), font_size=20)
            
            # 显示低光增强状态
            if is_enhanced:
                enhance_text = "低光增强: ON"
                frame = self.put_chinese_text(frame, enhance_text, (10, 180), color=(255, 255, 0), font_size=20)
            
            # 显示快捷键提示（右下角）
            h, w = frame.shape[:2]
            help_text = [
                "快捷键:",
                "Q-退出 C-清空画布 R-重置颜色",
                "[/]-调绘图阈值 -/=-调检测阈值"
            ]
            y_start = h - 80
            for i, text in enumerate(help_text):
                y_pos = y_start + i * 25
                frame = self.put_chinese_text(frame, text, (w - 380, y_pos), color=(200, 200, 200), font_size=18)
            
            # 合并原始帧和画布
            combined = cv2.addWeighted(frame, 0.7, self.canvas, 0.3, 0)
            
            # 显示结果
            cv2.imshow('Hand Gesture Recognition & Virtual Drawing', combined)
            
            # 退出条件
            key = cv2.waitKey(1) & 0xFF
            if key == ord('q'):
                break
            elif key == ord('c'):  # 手动清空画布
                self.canvas = np.zeros_like(self.canvas)
            elif key == ord('r'):  # 重置颜色
                self.color_index = 0
                self.pen_color = self.colors[self.color_index]
            elif key == ord('['):  # 降低食指绘图阈值
                self.index_draw_conf_threshold = max(0.2, self.index_draw_conf_threshold - 0.05)
            elif key == ord(']'):  # 提高食指绘图阈值
                self.index_draw_conf_threshold = min(0.9, self.index_draw_conf_threshold + 0.05)
            elif key == ord('-'):  # 降低检测阈值
                self.detection_conf_threshold = max(0.1, self.detection_conf_threshold - 0.05)
            elif key == ord('='):  # 提高检测阈值
                self.detection_conf_threshold = min(0.8, self.detection_conf_threshold + 0.05)
        
        # 清理资源
        self.cap.release()
        cv2.destroyAllWindows()

def main():
    print("=" * 60)
    print("基于hand_YOLOv8、TensorFlow与OpenCV的实时手势识别与虚拟绘图系统")
    print("=" * 60)
    
    # 创建系统实例
    system = HandGestureRecognitionSystem()
    
    # 运行系统
    system.run()

if __name__ == "__main__":
    main()