import mediapipe as mp
import numpy as np
import cv2
from pathlib import Path
from utils.points_operation import part_all_ratio, process_points
from utils.dir_operation import change_folder_permissions, clear_folder, \
zip_folder, get_unique_filename
from utils.train_val_split import ratio_split
from utils.load_model import load_pth
import time
class FaceMeshDetector:

    def __init__(self, static_image_mode=False, max_num_faces=1, refine_landmarks=False, min_detection_con=0.5,
                 min_tracking_con=0.5):
        # Initialize the parameters for face mesh detection
        self.static_image_mode = static_image_mode  # Whether to process images (True) or video stream (False)
        self.max_num_faces = max_num_faces  # Maximum number of faces to detect
        self.refine_landmarks = refine_landmarks  # Whether to refine iris landmarks for better precision
        self.min_detection_con = min_detection_con  # Minimum confidence for face detection
        self.min_tracking_con = min_tracking_con  # Minimum confidence for tracking

        # Initialize Mediapipe FaceMesh solution
        self.mpFaceMesh = mp.solutions.face_mesh
        self.faceMesh = self.mpFaceMesh.FaceMesh(self.static_image_mode,
                                                 self.max_num_faces,
                                                 self.refine_landmarks,
                                                 self.min_detection_con,
                                                 self.min_tracking_con)

        # Store the landmark indices for specific facial features
        # These are predefined Mediapipe indices for left and right eyes, iris, nose, and mouth

        self.LEFT_EYE_LANDMARKS = [398, 384, 385, 386, 387, 388, 466, 263, 249, 390, 373, 374,
                                   380, 381, 382, 362]  # Left eye landmarks

        self.RIGHT_EYE_LANDMARKS = [33, 246, 161, 160, 159, 158, 157, 173, 133, 155, 154, 153, 145,
                                    144, 163, 7]  # Right eye landmarks

        self.LEFT_IRIS_LANDMARKS = [474, 475, 477, 476]  # Left iris landmarks
        self.RIGHT_IRIS_LANDMARKS = [469, 470, 471, 472]  # Right iris landmarks

        self.NOSE_LANDMARKS = [193, 168, 417, 122, 351, 196, 419, 3, 248, 236, 456, 198, 420, 131, 360, 49, 279, 48,
                               278, 219, 439, 59, 289, 218, 438, 237, 457, 44, 19, 274]  # Nose landmarks

        self.MOUTH_LANDMARKS = [0, 267, 269, 270, 409, 306, 375, 321, 405, 314, 17, 84, 181, 91, 146, 61, 185, 40, 39,
                                37]  # Mouth landmarks
        self.LEFT_EYEBROW_LANDMARKS = [336, 296, 334, 293, 300, 276, 283, 282, 295, 285]
        self.RIGHT_EYEBROW_LANDMARKS = [70, 63, 105, 66, 107, 55, 65, 52, 53, 46]
    def findMeshInFace(self, img):
        # Initialize a dictionary to store the landmarks for facial features
        landmarks = {}

        # Convert the input image to RGB as Mediapipe expects RGB images
        imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        # Process the image to find face landmarks using the FaceMesh model
        results = self.faceMesh.process(imgRGB)

        # Check if any faces were detected
        if results.multi_face_landmarks:
            # Iterate over detected faces (here, max_num_faces = 1, so usually one face)
            for faceLms in results.multi_face_landmarks:
                # Initialize lists in the landmarks dictionary to store each facial feature's coordinates
                landmarks["left_eye_landmarks"] = []
                landmarks["right_eye_landmarks"] = []
                landmarks["left_iris_landmarks"] = []
                landmarks["right_iris_landmarks"] = []
                landmarks["nose_landmarks"] = []
                landmarks["mouth_landmarks"] = []
                landmarks["all_landmarks"] = []  # Store all face landmarks for complete face mesh
                landmarks["left_eyebrow_landmarks"] = []
                landmarks["right_eyebrow_landmarks"] = []
                # Loop through all face landmarks
                for i, lm in enumerate(faceLms.landmark):
                    h, w, ic = img.shape  # Get image height, width, and channel count
                    x, y, z = int(lm.x * w), int(lm.y * h), lm.z  # Convert normalized coordinates to pixel values

                    # Store the coordinates of all landmarks
                    landmarks["all_landmarks"].append((x, y, z))

                    # Store specific feature landmarks based on the predefined indices
                    if i in self.LEFT_EYE_LANDMARKS:
                        landmarks["left_eye_landmarks"].append((x, y, z))  # Left eye
                    if i in self.RIGHT_EYE_LANDMARKS:
                        landmarks["right_eye_landmarks"].append((x, y, z))  # Right eye
                    if i in self.LEFT_IRIS_LANDMARKS:
                        landmarks["left_iris_landmarks"].append((x, y, z))  # Left iris
                    if i in self.RIGHT_IRIS_LANDMARKS:
                        landmarks["right_iris_landmarks"].append((x, y, z))  # Right iris
                    if i in self.NOSE_LANDMARKS:
                        landmarks["nose_landmarks"].append((x, y, z))  # Nose
                    if i in self.MOUTH_LANDMARKS:
                        landmarks["mouth_landmarks"].append((x, y, z))  # Mouth
                    if i in self.LEFT_EYEBROW_LANDMARKS:
                        landmarks["left_eyebrow_landmarks"].append((x, y, z))  # Left eyebrow
                    if i in self.RIGHT_EYEBROW_LANDMARKS:
                        landmarks["right_eyebrow_landmarks"].append((x, y, z))  # Right eyebrow

        # Return the processed image and the dictionary of feature landmarks
        return img, landmarks


# Initialize the FaceMeshDetector with refined iris landmarks for better precision
detector = FaceMeshDetector(refine_landmarks=True)

# Define the facial features (eyes, nose, mouth, iris, and all landmarks) we are interested in
face_parts = ["left_eye_landmarks", "right_eye_landmarks", "nose_landmarks",
              "mouth_landmarks", "all_landmarks", "left_iris_landmarks",
              "right_iris_landmarks", "left_eyebrow_landmarks", "right_eyebrow_landmarks"]

# Specify which facial feature to detect (index 2 refers to the nose landmarks here)
face_part = [0, 1, 3, 7, 8]
display_width = 640
display_height = 480
cv2.namedWindow('Face Landmark Detection', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Face Landmark Detection', display_width, display_height)
# Capture video from the file "woman_face.mp4"
cap = cv2.VideoCapture(0) # 0 for webcam

# Start a loop to process the video frame by frame
def start_record(root, category, total_num):
    Path(root).mkdir(parents=True, exist_ok=True)
    p = Path(root) / category
    p.mkdir(parents=True, exist_ok=True)
    num_img = 0
    while True:
        # Read the next frame from the video capture
        success, image = cap.read()
        image = cv2.flip(image, 1)
        # Use the FaceMeshDetector to find facial landmarks in the current frame
        image, landmarks = detector.findMeshInFace(image)
        h, w = image.shape[:2]
        points_image = np.zeros((h, w), dtype=np.uint8)
        # If reading the frame was unsuccessful (e.g., end of video), break the loop
        if not success:
            break

        # Try to draw the landmarks for the specified face part (nose, in this case)
        try:
            # face_distances = []
            for part in face_part:
                landmark_part = landmarks[face_parts[part]]
                # distances = part_all_ratio(landmark_part, part_type=face_parts[part])
                # face_distances.extend(distances)
                for landmark in landmark_part:
                    # Draw a small green circle at each landmark coordinate
                    process_points(points_image, landmark)
                    cv2.circle(image, (landmark[0], landmark[1]), 3, (0, 255, 0), -1)
           
            num_img += 1
        
        except KeyError:
            # If the landmark for the specified part is not found, skip drawing
            pass
        if num_img == total_num:
            break
        # np.save(p / (category + "_" + get_unique_filename() + ".npy"), np.array(points_image))
        resized_image = cv2.resize(image, (display_width, display_height))
        cv2.imshow('Face Landmark Detection', resized_image)
        # Wait for a key press for 1 millisecond and check if the user pressed the 'q' key to quit
        key = cv2.waitKey(1)
        if key & 0xFF == ord('q'):
            break
if __name__ == '__main__':
    clear_folder("data")
    category = ["frown", "open_eye", "close_eye", "open_mouth"]
    start_record('data', category[3], 1000)
    ratio_split("data", "points", 0.8)
        
