import cv2
import numpy as np

# 尝试导入 mediapipe，如不支持（如 Python 3.13 上无轮子），则自动回退到 OpenCV 方案
HAS_MEDIAPIPE = False
try:
    import mediapipe as mp  # type: ignore
    HAS_MEDIAPIPE = True
except Exception:
    HAS_MEDIAPIPE = False


class HandTracker:
    def __init__(self):
        self.smoothed_x: int = 0
        self.smoothed_y: int = 0
        self.window_size: int = 5
        self.x_history: list[int] = []
        self.y_history: list[int] = []

        if HAS_MEDIAPIPE:
            # 使用 MediaPipe Hands
            self.mp_hands = mp.solutions.hands
            self.hands = self.mp_hands.Hands(
                static_image_mode=False,
                max_num_hands=1,
                min_detection_confidence=0.7,
                min_tracking_confidence=0.7
            )
            self.mp_draw = mp.solutions.drawing_utils
        else:
            # OpenCV 皮肤分割方案参数（可按需微调）
            # HSV 色域下的皮肤范围（适配多数光线，仍可能需要根据环境调节）
            self.lower_hsv = np.array([0, 30, 60])
            self.upper_hsv = np.array([20, 170, 255])
            # 形态学处理核
            self.kernel = np.ones((5, 5), np.uint8)

    def _smooth(self, x: int, y: int) -> tuple[int, int]:
        self.x_history.append(x)
        self.y_history.append(y)
        if len(self.x_history) > self.window_size:
            self.x_history.pop(0)
            self.y_history.pop(0)
        self.smoothed_x = int(np.mean(self.x_history))
        self.smoothed_y = int(np.mean(self.y_history))
        return self.smoothed_x, self.smoothed_y

    def _process_with_mediapipe(self, frame: np.ndarray) -> tuple[np.ndarray, tuple[int, int]]:
        rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        results = self.hands.process(rgb_frame)
        h, w = frame.shape[:2]
        finger_x, finger_y = -1, -1

        if results.multi_hand_landmarks:
            for hand_landmarks in results.multi_hand_landmarks:
                # 获取食指指尖坐标（ landmark 8 ）
                index_finger_tip = hand_landmarks.landmark[8]
                finger_x = int(index_finger_tip.x * w)
                finger_y = int(index_finger_tip.y * h)

                # 绘制手部关键点
                self.mp_draw.draw_landmarks(
                    frame, hand_landmarks, self.mp_hands.HAND_CONNECTIONS
                )

        if finger_x >= 0 and finger_y >= 0:
            sx, sy = self._smooth(finger_x, finger_y)
            cv2.circle(frame, (sx, sy), 15, (0, 255, 0), cv2.FILLED)
        return frame, (self.smoothed_x, self.smoothed_y)

    def _process_with_opencv(self, frame: np.ndarray) -> tuple[np.ndarray, tuple[int, int]]:
        # 基于 HSV 的皮肤分割，计算最大轮廓凸包并估计顶部凸点作为指尖
        h, w = frame.shape[:2]
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, self.lower_hsv, self.upper_hsv)
        # 去噪与闭运算
        mask = cv2.medianBlur(mask, 5)
        mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, self.kernel, iterations=1)
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, self.kernel, iterations=2)

        contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        finger_x, finger_y = -1, -1
        if contours:
            # 选最大轮廓作为手部区域
            cnt = max(contours, key=cv2.contourArea)
            if cv2.contourArea(cnt) > 500:  # 面积阈值，过滤噪声
                # 绘制轮廓
                cv2.drawContours(frame, [cnt], -1, (255, 0, 0), 2)
                hull = cv2.convexHull(cnt, returnPoints=True)
                if hull is not None and len(hull) > 0:
                    # 选择 y 最小（画面顶部）的点作为“指尖”估计
                    top_point = min(hull.reshape(-1, 2), key=lambda p: p[1])
                    finger_x, finger_y = int(top_point[0]), int(top_point[1])

        if finger_x >= 0 and finger_y >= 0:
            sx, sy = self._smooth(finger_x, finger_y)
            cv2.circle(frame, (sx, sy), 15, (0, 255, 0), cv2.FILLED)
        return frame, (self.smoothed_x, self.smoothed_y)

    def process_frame(self, frame: np.ndarray) -> tuple[np.ndarray, tuple[int, int]]:
        """处理单帧图像，返回指尖坐标（平滑后）"""
        if HAS_MEDIAPIPE:
            return self._process_with_mediapipe(frame)
        else:
            return self._process_with_opencv(frame)

    def release(self) -> None:
        if HAS_MEDIAPIPE:
            self.hands.close()


# 测试代码
if __name__ == "__main__":
    cap = cv2.VideoCapture(0)
    tracker = HandTracker()

    while cap.isOpened():
        success, frame = cap.read()
        if not success:
            print("无法读取摄像头画面")
            break

        frame = cv2.flip(frame, 1)  # 水平翻转画面
        processed_frame, (x, y) = tracker.process_frame(frame)

        # 显示坐标信息
        cv2.putText(
            processed_frame, f"Finger: ({x}, {y})", (10, 30),
            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2
        )

        # 提示当前使用的跟踪后端
        backend = "MediaPipe" if HAS_MEDIAPIPE else "OpenCV"
        cv2.putText(
            processed_frame, f"Backend: {backend}", (10, 65),
            cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2
        )

        cv2.imshow("Hand Tracker Test", processed_frame)

        if cv2.waitKey(5) & 0xFF == 27:  # ESC键退出
            break

    tracker.release()
    cap.release()
    cv2.destroyAllWindows()
    print("手势识别模块测试完成，文件已保存为 hand_tracker.py。当前后端:", "MediaPipe" if HAS_MEDIAPIPE else "OpenCV")
