#!/usr/bin/env python3
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from unitree_api.msg import Request
import cv2
import mediapipe as mp
import json

class GestureControlNode(Node):
    def __init__(self):
        super().__init__('gesture_control_node')
        
        # ����ͼ����
        self.declare_parameter('image_topic', '/camera/image_raw')
        self.declare_parameter('publish_rate', 10.0)
        
        # Mediapipe����ʶ��
        self.mp_hands = mp.solutions.hands.Hands(
            static_image_mode=False,
            max_num_hands=1,
            min_detection_confidence=0.7,
            min_tracking_confidence=0.5
        )
        self.mp_drawing = mp.solutions.drawing_utils
        
        # 手势状态管�?
        self.current_gesture = "BALANCESTAND"  # 默认平衡站立
        self.gesture_history = []
        self.history_size = 5  # 连续5帧确认手�?
        
        # AGIROS接口
        self.bridge = CvBridge()
        self.request_pub = self.create_publisher(Request, "/api/sport/request", 10)
        self.image_sub = self.create_subscription(
            Image,
            self.get_parameter('image_topic').value,
            self.image_callback,
            10
        )
        
        # 定时发布请求
        self.timer = self.create_timer(
            1.0 / self.get_parameter('publish_rate').value,
            self.publish_request
        )
        
        self.get_logger().info("手势控制节点启动")

    def classify_gesture(self, landmarks):
        """根据手部关键点识别手�?"""
        thumb_tip = landmarks.landmark[4]
        index_tip = landmarks.landmark[8]
        middle_tip = landmarks.landmark[12]
        ring_tip = landmarks.landmark[16]
        pinky_tip = landmarks.landmark[20]
        wrist = landmarks.landmark[0]
        
        # 计算手指是否伸直
        fingers = [
            index_tip.y < landmarks.landmark[7].y,  # 食指
            middle_tip.y < landmarks.landmark[11].y, # 中指
            ring_tip.y < landmarks.landmark[15].y,   # 无名�?
            pinky_tip.y < landmarks.landmark[19].y   # 小指
        ]
        
        # 手势到动作的映射
        if thumb_tip.x < wrist.x and all(fingers):  # 五指张开
            return "HELLO"
        elif thumb_tip.y < wrist.y and not any(fingers):  # 握拳
            return "STRETCH"
        elif thumb_tip.x > index_tip.x and fingers[0] and not any(fingers[1:]):  # 食指伸直
            return "SCRATCH"
        elif thumb_tip.y > wrist.y:  # 拇指向下
            return "SIT"
        else:  # 其他情况默认为站�?
            return "BALANCESTAND"

    def image_callback(self, msg):
        try:
            cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
        except Exception as e:
            self.get_logger().error(f"Image conversion error: {e}")
            return
        
        # 手势识别
        rgb_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
        results = self.mp_hands.process(rgb_image)
        
        if results.multi_hand_landmarks:
            for landmarks in results.multi_hand_landmarks:
                # 识别手势
                gesture = self.classify_gesture(landmarks)
                self.gesture_history.append(gesture)
                
                # 保持历史记录长度
                if len(self.gesture_history) > self.history_size:
                    self.gesture_history.pop(0)
                
                # 当连续多帧检测到相同手势时更新状�?
                if (len(self.gesture_history) == self.history_size and 
                    all(g == gesture for g in self.gesture_history) and
                    gesture != self.current_gesture):
                    
                    self.current_gesture = gesture
                    self.get_logger().info(f"Gesture changed to: {gesture}")
                
                # 可视�?
                self.mp_drawing.draw_landmarks(
                    cv_image, landmarks, mp.solutions.hands.HAND_CONNECTIONS)
                
                cv2.putText(cv_image, f"Gesture: {gesture}", (10, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
        
        cv2.imshow("Gesture Control", cv_image)
        cv2.waitKey(1)

    def publish_request(self):
        """发布运动请求"""
        request = Request()
        
        # 手势到API ID的映�?
        gesture_mapping = {
            "BALANCESTAND": 1004,  # 站立
            "SIT": 1009,            # 坐下
            "HELLO": 1016,          # 打招�?
            "STRETCH": 1017,       # 伸展
            "SCRATCH": 1029         # 刮擦
        }
        
        api_id = gesture_mapping.get(self.current_gesture, 1004)
        request.header.identity.api_id = api_id
        
        # 只有在MOVE模式才需要设置参�?
        if self.current_gesture == "MOVE":
            # 这里可以添加移动参数，但手势控制通常不需�?
            pass
        else:
            request.parameter = json.dumps({})  # 空参�?
        
        self.request_pub.publish(request)

def main(args=None):
    rclpy.init(args=args)
    node = GestureControlNode()
    try:
        rclpy.spin(node)
    except KeyboardInterrupt:
        pass
    finally:
        node.destroy_node()
        rclpy.shutdown()
        cv2.destroyAllWindows()

if __name__ == '__main__':
    main()