import numpy as np
import cv2
import mediapipe as mp
import backend.ML.inference as inference


def test():
    gestures = []
    cur_num = 0
    # 初始化 Mediapipe 手部模块
    mp_hands = mp.solutions.hands
    hands = mp_hands.Hands()
    # 初始化绘图工具
    mp_drawing = mp.solutions.drawing_utils
    # 打开摄像头
    cap = cv2.VideoCapture(0)
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            continue
        # 将 BGR 图像转换为 RGB
        rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        # 进行手部关节检测
        results = hands.process(rgb_frame)
        # 如果检测到手部关节

        if results.multi_hand_landmarks:
            for landmarks in results.multi_hand_landmarks:
                # 在图像上绘制手部关节
                mp_drawing.draw_landmarks(
                    frame, landmarks, mp_hands.HAND_CONNECTIONS)
                gesture = [[landmark.x, landmark.y, landmark.z]
                           for landmark in landmarks.landmark]
                res = inference.predict(gesture)
                print("result is :", res)
        # 显示帧
        cv2.imshow('Hand Tracking', frame)
        # 按 'q' 键退出循环
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    # 释放资源
    cap.release()
    cv2.destroyAllWindows()


test()
