import cv2
import mediapipe as mp
import asyncio
from aiohttp import web, WSMsgType
import json
import aiohttp_jinja2
import jinja2

class HandGestureRecognition:
    """手势识别控制系统的主类"""
    def __init__(self):
        # 初始化MediaPipe手势识别模块
        self.mp_hands = mp.solutions.hands
        self.hands = self.mp_hands.Hands(
            static_image_mode=False,        # 视频模式（非静态图片）
            max_num_hands=2,                # 修改为可以同时检测两只手
            min_detection_confidence=0.7,    # 检测置信度阈值
            min_tracking_confidence=0.5      # 追踪置信度阈值
        )
        self.mp_draw = mp.solutions.drawing_utils  # 用于绘制手部关键点
        self.clients = set()                # 存储所有连接的客户端

    def detect_hand_type(self, hand_landmarks, handedness):
        """
        检测手的类型（左手或右手）
        参数:
            hand_landmarks: 手部关键点数据
            handedness: MediaPipe返回的手部类型数据
        返回:
            'Left' 或 'Right'
        """
        return handedness.classification[0].label

    def detect_gesture(self, hand_landmarks, frame_shape, hand_type):
        """
        检测手势并返回状态（分别处理左右手）
        """
        # 获取关键点
        index_tip = hand_landmarks.landmark[8]
        wrist = hand_landmarks.landmark[0]
        
        # 计算手掌大小
        palm_size = ((wrist.x - index_tip.x)**2 + (wrist.y - index_tip.y)**2)**0.5
        
        # 获取手指位置
        x = index_tip.x * 100
        y = index_tip.y * 100
        
        # 根据手的类型返回不同的状态
        if hand_type == 'Right':
            return {
                'status': 'MOVE',
                'hand_type': 'Right',
                'x': x,
                'y': y,
                'scale': palm_size
            }
        else:  # 左手
            # 添加确认动作的判断（x > 65 表示向右）
            status = 'CONFIRM' if x > 65 else 'MENU'
            return {
                'status': status,
                'hand_type': 'Left',
                'x': x,
                'y': y
            }

    async def broadcast_status(self, data):
        """
        向所有连接的客户端广播数据
        参数:
            data: 要发送的数据
        """
        if self.clients:  # 如果有连接的客户端
            message = json.dumps(data)  # 将数据转换为JSON字符串
            # 同时向所有客户端发送消息
            await asyncio.gather(
                *(client.send_str(message) for client in self.clients)
            )

    async def websocket_handler(self, request):
        """
        处理WebSocket连接
        参数:
            request: 客户端请求
        """
        ws = web.WebSocketResponse()
        await ws.prepare(request)
        
        self.clients.add(ws)  # 添加新的客户端连接
        print(f"客户端已连接，当前总连接数: {len(self.clients)}")
        
        try:
            async for msg in ws:  # 等待客户端消息
                if msg.type == WSMsgType.ERROR:
                    print(f"WebSocket错误: {ws.exception()}")
        finally:
            self.clients.remove(ws)  # 移除断开的客户端
            print(f"客户端已断开，剩余连接数: {len(self.clients)}")
        
        return ws

    def get_frame(self):
        """获取ESP32-CAM摄像头画面"""
        try:
            if not hasattr(self, 'cap'):
                # 添加重试机制
                max_retries = 3
                for attempt in range(max_retries):
                    print(f"尝试连接ESP32-CAM (尝试 {attempt + 1}/{max_retries})...")
                    
                    # 可以在这里修改IP地址
                    url = "http://192.168.1.3:81/stream"
                    self.cap = cv2.VideoCapture(url)
                    
                    if self.cap.isOpened():
                        print("ESP32-CAM 连接成功！")
                        break
                    else:
                        print(f"连接失败，等待重试...")
                        time.sleep(2)  # 等待2秒后重试
                
                if not self.cap.isOpened():
                    print("无法连接到ESP32-CAM，请检查：")
                    print("1. ESP32-CAM 的IP地址是否正确")
                    print("2. ESP32-CAM 是否已启动")
                    print("3. 电脑和ESP32-CAM是否在同一网络")
                    print("4. 是否可以通过浏览器访问视频流")
                    return None
            
            ret, frame = self.cap.read()
            if not ret:
                print("读取视频帧失败")
                self.cap.release()
                self.cap = None
                return None
            
            return frame
            
        except Exception as e:
            print(f"获取视频帧时发生错误: {str(e)}")
            if hasattr(self, 'cap'):
                self.cap.release()
                self.cap = None
            return None

    # 网页路由处理函数
    @aiohttp_jinja2.template('index.html')
    async def serve_index(self, request):
        """处理主页请求"""
        return {}

    @aiohttp_jinja2.template('3d-demo.html')
    async def serve_3d_demo(self, request):
        """处理3D演示页面请求"""
        return {}

    async def run(self):
        """
        系统主运行函数
        """
        # 设置Web应用和路由
        app = web.Application()
        app.router.add_get('/', self.serve_index)
        app.router.add_get('/3d-demo', self.serve_3d_demo)
        app.router.add_get('/ws', self.websocket_handler)
        
        # 设置网页模板
        aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader('templates'))
        
        # 启动Web服务器
        runner = web.AppRunner(app)
        await runner.setup()
        site = web.TCPSite(runner, 'localhost', 8080)
        await site.start()
        
        print("服务器已启动:")
        print("- Web界面: http://localhost:8080")
        print("- WebSocket: ws://localhost:8080/ws")

        try:
            # 主循环
            while True:
                # 获取视频帧
                frame = self.get_frame()
                if frame is None:
                    print("无法获取摄像头画面，重试中...")
                    await asyncio.sleep(0.1)
                    continue

                # 水平翻转画面
                frame = cv2.flip(frame, 1)
                
                # 获取画面尺寸
                height, width = frame.shape[:2]
                
                # 绘制九宫格分割线
                # 垂直线
                for i in range(1, 3):
                    x = int(width * i / 3)
                    cv2.line(frame, (x, 0), (x, height), (0, 255, 0), 1)
                
                # 水平线
                for i in range(1, 3):
                    y = int(height * i / 3)
                    cv2.line(frame, (0, y), (width, y), (0, 255, 0), 1)
                    
                # 添加区域编号（1-9）
                font = cv2.FONT_HERSHEY_SIMPLEX
                cell_width = width // 3
                cell_height = height // 3
                for i in range(3):
                    for j in range(3):
                        number = i * 3 + j + 1
                        x = j * cell_width + cell_width // 2 - 10
                        y = i * cell_height + cell_height // 2 + 10
                        cv2.putText(frame, str(number), (x, y), font, 
                                  1, (0, 255, 0), 2)

                # 将BGR格式转换为RGB格式
                rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                # 进行手势识别
                results = self.hands.process(rgb_frame)

                if results.multi_hand_landmarks and results.multi_handedness:
                    # 同时处理手部标记点和手的类型
                    for hand_landmarks, handedness in zip(results.multi_hand_landmarks, results.multi_handedness):
                        # 获取手的类型（左手或右手）
                        hand_type = self.detect_hand_type(hand_landmarks, handedness)
                        
                        # 在画面上标注手的类型
                        wrist_point = hand_landmarks.landmark[0]
                        h, w, _ = frame.shape
                        cx, cy = int(wrist_point.x * w), int(wrist_point.y * h)
                        cv2.putText(frame, hand_type, (cx-20, cy-20), 
                                  cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

                        # 获取当前手所在的区域
                        region_x = int(wrist_point.x * 3) + 1
                        region_y = int(wrist_point.y * 3) + 1
                        region_number = region_y * 3 - (3 - region_x)
                        
                        # 在手腕位置显示区域编号
                        cv2.putText(frame, f"Region: {region_number}", 
                                  (cx-20, cy+30), font, 0.7, (0, 255, 0), 2)

                        # 绘制手部关键点
                        self.mp_draw.draw_landmarks(
                            frame, 
                            hand_landmarks, 
                            self.mp_hands.HAND_CONNECTIONS
                        )
                        
                        # 检测手势并广播状态
                        data = self.detect_gesture(hand_landmarks, frame.shape, hand_type)
                        if data['status'] != 'IGNORE':  # 只广播右手的数据
                            await self.broadcast_status(data)

                # 显示处理后的画面
                cv2.imshow("Hand Gesture Control", frame)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
                
                # 短暂休眠以控制帧率
                await asyncio.sleep(0.01)

        finally:
            # 清理资源
            cv2.destroyAllWindows()
            self.hands.close()
            await runner.cleanup()

# 程序入口点
if __name__ == "__main__":
    print("启动手势控制系统...")
    recognition = HandGestureRecognition()  # 创建手势识别对象
    asyncio.run(recognition.run())          # 运行系统