Jassk28 commited on
Commit
c1cc099
1 Parent(s): 1fb93f2

Upload face_cropper.py

Browse files
Files changed (1) hide show
  1. face_cropper.py +103 -0
face_cropper.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import mediapipe as mp
3
+ import os
4
+ from gradio_client import Client
5
+ # from test_image_fusion import Test
6
+ # from test_image_fusion import Test
7
+ from test_image import Test
8
+ import numpy as np
9
+
10
+
11
+
12
+ from PIL import Image
13
+ import numpy as np
14
+ import cv2
15
+
16
+ # client = Client("https://tbvl-real-and-fake-face-detection.hf.space/--replicas/40d41jxhhx/")
17
+
18
+ data = 'faceswap'
19
+ dct = 'fft'
20
+
21
+
22
+ # testet = Test(model_paths = [f"weights/{data}-hh-best_model.pth",
23
+ # f"weights/{data}-fft-best_model.pth"],
24
+ # multi_modal = ['hh', 'fft'])
25
+
26
+ testet = Test(model_path =f"weights/{data}-hh-best_model.pth",
27
+ multi_modal ='hh')
28
+
29
+ # Initialize MediaPipe Face Detection
30
+ mp_face_detection = mp.solutions.face_detection
31
+ mp_drawing = mp.solutions.drawing_utils
32
+ face_detection = mp_face_detection.FaceDetection(model_selection=1, min_detection_confidence=0.35)
33
+
34
+ # Create a directory to save the cropped face images if it does not exist
35
+ save_dir = "cropped_faces"
36
+ os.makedirs(save_dir, exist_ok=True)
37
+
38
+ # def detect_and_label_faces(image_path):
39
+
40
+
41
+ # Function to crop faces from a video and save them as images
42
+ # def crop_faces_from_video(video_path):
43
+ # # Read the video
44
+ # cap = cv2.VideoCapture(video_path)
45
+ # frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
46
+ # frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
47
+ # fps = int(cap.get(cv2.CAP_PROP_FPS))
48
+ # total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
49
+
50
+ # # Define the codec and create VideoWriter object
51
+ # out = cv2.VideoWriter(f'output_{real}_{data}_fusion.avi', cv2.VideoWriter_fourcc('M','J','P','G'), fps, (frame_width, frame_height))
52
+
53
+ # if not cap.isOpened():
54
+ # print("Error: Could not open video.")
55
+ # return
56
+ # Convert PIL Image to NumPy array for OpenCV
57
+ def pil_to_opencv(pil_image):
58
+ open_cv_image = np.array(pil_image)
59
+ # Convert RGB to BGR for OpenCV
60
+ open_cv_image = open_cv_image[:, :, ::-1].copy()
61
+ return open_cv_image
62
+
63
+ # Convert OpenCV NumPy array to PIL Image
64
+ def opencv_to_pil(opencv_image):
65
+ # Convert BGR to RGB
66
+ pil_image = Image.fromarray(opencv_image[:, :, ::-1])
67
+ return pil_image
68
+
69
+
70
+
71
+
72
+ def detect_and_label_faces(frame):
73
+ frame = pil_to_opencv(frame)
74
+
75
+
76
+ print(type(frame))
77
+ # Convert the frame to RGB
78
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
79
+ # Perform face detection
80
+ results = face_detection.process(frame_rgb)
81
+
82
+ # If faces are detected, crop and save each face as an image
83
+ if results.detections:
84
+ for face_count,detection in enumerate(results.detections):
85
+ bboxC = detection.location_data.relative_bounding_box
86
+ ih, iw, _ = frame.shape
87
+ x, y, w, h = int(bboxC.xmin * iw), int(bboxC.ymin * ih), int(bboxC.width * iw), int(bboxC.height * ih)
88
+ # Crop the face region and make sure the bounding box is within the frame dimensions
89
+ crop_img = frame[max(0, y):min(ih, y+h), max(0, x):min(iw, x+w)]
90
+ if crop_img.size > 0:
91
+ face_filename = os.path.join(save_dir, f'face_{face_count}.jpg')
92
+ cv2.imwrite(face_filename, crop_img)
93
+
94
+ label = testet.testimage(face_filename)
95
+
96
+ if os.path.exists(face_filename):
97
+ os.remove(face_filename)
98
+
99
+ color = (0, 0, 255) if label == 'fake' else (0, 255, 0)
100
+ cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
101
+ cv2.putText(frame, label, (x, y + 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, color, 2)
102
+ return opencv_to_pil(frame)
103
+