
import cv2
import mediapipe as mp
from ultralytics import YOLO

YOLO_MODEL_PATH = "GesTure.pt"
GESTURE_BASE_DISTANCE = 0.3
HAND_MAX_NUM = 2
HAND_MIN_DETECTION_CONFIDENCE = 0.5
HAND_MIN_TRACKING_CONFIDENCE = 0.5
FACE_MAX_NUM = 1
FACE_MIN_DETECTION_CONFIDENCE = 0.5
FACE_MIN_TRACKING_CONFIDENCE = 0.5
TEXT_FONT = cv2.FONT_HERSHEY_SIMPLEX
TEXT_SCALE = 1
TEXT_COLOR = (0, 0, 255)
TEXT_THICKNESS = 2
TEXT_POSITION = (50, 50)
EXIT_KEY = ord('q')
HAND_DRAWING_SPEC_1 = mp.solutions.drawing_utils.DrawingSpec(color=(255, 0, 0), thickness=2, circle_radius=2)
HAND_DRAWING_SPEC_2 = mp.solutions.drawing_utils.DrawingSpec(color=(0, 255, 0), thickness=2)
FACE_DRAWING_SPEC_1 = mp.solutions.drawing_utils.DrawingSpec(color=(0, 255, 0), thickness=1, circle_radius=1)
FACE_DRAWING_SPEC_2 = mp.solutions.drawing_utils.DrawingSpec(color=(0, 255, 255), thickness=1)

def load_yolo_model(model_path):
    try:
        return YOLO(model_path)
    except Exception as e:
        print(f"Failed to load YOLO model: {e}")
        raise

class CameraCapture:
    def __init__(self):
        self.cap = cv2.VideoCapture(0)
        if not self.cap.isOpened():
            print("Failed to open camera")
            raise Exception("Camera could not be opened.")

    def __enter__(self):
        return self.cap

    def __exit__(self, exc_type, exc_val, exc_tb):
        if self.cap.isOpened():
            self.cap.release()

def distance(m, n):
    return ((n.x - m.x) ** 2 + (n.y - m.y) ** 2) ** 0.5

def detect_gesture(handLms):
    distance_0_8 = distance(handLms.landmark[0], handLms.landmark[8])
    distance_0_12 = distance(handLms.landmark[0], handLms.landmark[12])
    distance_0_16 = distance(handLms.landmark[0], handLms.landmark[16])
    distance_0_20 = distance(handLms.landmark[0], handLms.landmark[20])

    gesture = "One"
    if distance_0_8 >= GESTURE_BASE_DISTANCE and distance_0_12 >= GESTURE_BASE_DISTANCE and \
            distance_0_16 < GESTURE_BASE_DISTANCE and distance_0_20 < GESTURE_BASE_DISTANCE:
        gesture = "Scissor"
    elif distance_0_8 >= GESTURE_BASE_DISTANCE and distance_0_12 >= GESTURE_BASE_DISTANCE and \
            distance_0_16 >= GESTURE_BASE_DISTANCE and distance_0_20 >= GESTURE_BASE_DISTANCE:
        gesture = "Paper"
    elif distance_0_8 < GESTURE_BASE_DISTANCE and distance_0_12 < GESTURE_BASE_DISTANCE and \
            distance_0_16 < GESTURE_BASE_DISTANCE and distance_0_20 < GESTURE_BASE_DISTANCE:
        gesture = "Rock"
    return gesture

def face_mesh_detection(image, face_mesh, mp_drawing, mp_face_mesh):
    results = face_mesh.process(image)
    if results.multi_face_landmarks:
        for face_landmarks in results.multi_face_landmarks:
            mp_drawing.draw_landmarks(
                image, face_landmarks, mp_face_mesh.FACEMESH_CONTOURS,
                FACE_DRAWING_SPEC_1,
                FACE_DRAWING_SPEC_2
            )
    return image

def main():
    try:
        model = load_yolo_model(YOLO_MODEL_PATH)
        myDraw = mp.solutions.drawing_utils
        mpHands = mp.solutions.hands
        hands = mpHands.Hands(
            static_image_mode=False,
            max_num_hands=HAND_MAX_NUM,
            min_detection_confidence=HAND_MIN_DETECTION_CONFIDENCE,
            min_tracking_confidence=HAND_MIN_TRACKING_CONFIDENCE
        )
        mp_face_mesh = mp.solutions.face_mesh
        face_mesh = mp_face_mesh.FaceMesh(
            static_image_mode=False,
            max_num_faces=FACE_MAX_NUM,
            min_detection_confidence=FACE_MIN_DETECTION_CONFIDENCE,
            min_tracking_confidence=FACE_MIN_TRACKING_CONFIDENCE
        )
        mp_drawing = mp.solutions.drawing_utils

        with CameraCapture() as cap:
            while True:
                success, frame = cap.read()
                if not success:
                    print("Failed to read frame from camera.")
                    break

                results = model.predict(source=frame, device=0)
                annotated_frame = results[0].plot(line_width=2)

                for result in results:
                    boxes = result.boxes
                    for box in boxes:
                        cls = int(box.cls[0])
                        conf = float(box.conf[0])
                        x1, y1, x2, y2 = map(int, box.xyxy[0])
                        label = f"{result.names[cls]} {conf:.2f}"
                        cv2.putText(annotated_frame, label, (x1, y1 - 10), TEXT_FONT, 0.5, TEXT_COLOR, 1)
                results_hands = hands.process(frame)
                if results_hands.multi_hand_landmarks:
                    for handLms in results_hands.multi_hand_landmarks:
                        gesture = detect_gesture(handLms)
                        cv2.putText(annotated_frame, gesture, TEXT_POSITION, TEXT_FONT, TEXT_SCALE, TEXT_COLOR, TEXT_THICKNESS)
                        myDraw.draw_landmarks(
                            annotated_frame, handLms, mpHands.HAND_CONNECTIONS,
                            HAND_DRAWING_SPEC_1,
                            HAND_DRAWING_SPEC_2
                        )
                annotated_frame = face_mesh_detection(annotated_frame, face_mesh, mp_drawing, mp_face_mesh)
                cv2.imshow('Combined Detection', annotated_frame)

                # 通过按下指定键退出循环
                if cv2.waitKey(1) & 0xFF == EXIT_KEY:
                    break

    except Exception as e:
        import traceback
        print(f"An error occurred: {e}")
        traceback.print_exc()
    finally:
        cv2.destroyAllWindows()
        if 'hands' in locals():
            hands.close()
        if 'face_mesh' in locals():
            face_mesh.close()

if __name__ == "__main__":
    main()


    