|
import cv2 |
|
import gradio as gr |
|
import mediapipe as mp |
|
|
|
import imutils |
|
import numpy as np |
|
|
|
|
|
mp_drawing = mp.solutions.drawing_utils |
|
mp_drawing_styles = mp.solutions.drawing_styles |
|
mp_face_mesh = mp.solutions.face_mesh |
|
mp_face_detection = mp.solutions.face_detection |
|
|
|
|
|
def apply_media_pipe_face_detection(image): |
|
with mp_face_detection.FaceDetection( |
|
model_selection=1, min_detection_confidence=0.5) as face_detection: |
|
results = face_detection.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) |
|
if not results.detections: |
|
return image |
|
annotated_image = image.copy() |
|
for detection in results.detections: |
|
mp_drawing.draw_detection(annotated_image, detection) |
|
return annotated_image |
|
|
|
|
|
def apply_media_pipe_facemesh(image): |
|
with mp_face_mesh.FaceMesh( |
|
static_image_mode=True, |
|
max_num_faces=1, |
|
refine_landmarks=True, |
|
min_detection_confidence=0.5) as face_mesh: |
|
results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) |
|
if not results.multi_face_landmarks: |
|
return image |
|
annotated_image = image.copy() |
|
for face_landmarks in results.multi_face_landmarks: |
|
mp_drawing.draw_landmarks( |
|
image=annotated_image, |
|
landmark_list=face_landmarks, |
|
connections=mp_face_mesh.FACEMESH_TESSELATION, |
|
landmark_drawing_spec=None, |
|
connection_drawing_spec=mp_drawing_styles |
|
.get_default_face_mesh_tesselation_style()) |
|
mp_drawing.draw_landmarks( |
|
image=annotated_image, |
|
landmark_list=face_landmarks, |
|
connections=mp_face_mesh.FACEMESH_CONTOURS, |
|
landmark_drawing_spec=None, |
|
connection_drawing_spec=mp_drawing_styles |
|
.get_default_face_mesh_contours_style()) |
|
mp_drawing.draw_landmarks( |
|
image=annotated_image, |
|
landmark_list=face_landmarks, |
|
connections=mp_face_mesh.FACEMESH_IRISES, |
|
landmark_drawing_spec=None, |
|
connection_drawing_spec=mp_drawing_styles |
|
.get_default_face_mesh_iris_connections_style()) |
|
return annotated_image |
|
|
|
class FaceProcessing(object): |
|
def __init__(self, ui_obj): |
|
self.name = "Face Image Processing" |
|
self.description = "Call for Face Image and video Processing" |
|
self.ui_obj = ui_obj |
|
|
|
def take_webcam_photo(self, image): |
|
return image |
|
|
|
def take_webcam_video(self, images): |
|
return images |
|
|
|
def mp_webcam_photo(self, image): |
|
return image |
|
|
|
def mp_webcam_face_mesh(self, image): |
|
mesh_image = apply_media_pipe_facemesh(image) |
|
return mesh_image |
|
|
|
def mp_webcam_face_detection(self, image): |
|
face_detection_img = apply_media_pipe_face_detection(image) |
|
return face_detection_img |
|
|
|
def webcam_stream_update(self, video_frame): |
|
video_out = face_orientation_obj.create_orientation(video_frame) |
|
return video_out |
|
|
|
def create_ui(self): |
|
with self.ui_obj: |
|
gr.Markdown("Face Analysis with Webcam/Video") |
|
with gr.Tabs(): |
|
with gr.TabItem("Playing with Webcam"): |
|
with gr.Row(): |
|
webcam_image_in = gr.Image(label="Webcam Image Input", source="webcam") |
|
webcam_video_in = gr.Video(label="Webcam Video Input", source="webcam") |
|
with gr.Row(): |
|
webcam_photo_action = gr.Button("Take the Photo") |
|
webcam_video_action = gr.Button("Take the Video") |
|
with gr.Row(): |
|
webcam_photo_out = gr.Image(label="Webcam Photo Output") |
|
webcam_video_out = gr.Video(label="Webcam Video") |
|
with gr.TabItem("Mediapipe Facemesh with Webcam"): |
|
with gr.Row(): |
|
with gr.Column(): |
|
mp_image_in = gr.Image(label="Webcam Image Input", source="webcam") |
|
with gr.Column(): |
|
mp_photo_action = gr.Button("Take the Photo") |
|
mp_apply_fm_action = gr.Button("Apply Face Mesh the Photo") |
|
mp_apply_landmarks_action = gr.Button("Apply Face Landmarks the Photo") |
|
with gr.Row(): |
|
mp_photo_out = gr.Image(label="Webcam Photo Output") |
|
mp_fm_photo_out = gr.Image(label="Face Mesh Photo Output") |
|
mp_lm_photo_out = gr.Image(label="Face Landmarks Photo Output") |
|
with gr.TabItem("Face Orientation on Live Webcam Stream"): |
|
with gr.Row(): |
|
webcam_stream_in = gr.Image(label="Webcam Stream Input", |
|
source="webcam", |
|
streaming=True) |
|
webcam_stream_out = gr.Image(label="Webcam Stream Output") |
|
webcam_stream_in.change( |
|
self.webcam_stream_update, |
|
inputs=webcam_stream_in, |
|
outputs=webcam_stream_out |
|
) |
|
|
|
mp_photo_action.click( |
|
self.mp_webcam_photo, |
|
[ |
|
mp_image_in |
|
], |
|
[ |
|
mp_photo_out |
|
] |
|
) |
|
mp_apply_fm_action.click( |
|
self.mp_webcam_face_mesh, |
|
[ |
|
mp_image_in |
|
], |
|
[ |
|
mp_fm_photo_out |
|
] |
|
) |
|
mp_apply_landmarks_action.click( |
|
self.mp_webcam_face_detection, |
|
[ |
|
mp_image_in |
|
], |
|
[ |
|
mp_lm_photo_out |
|
] |
|
) |
|
webcam_photo_action.click( |
|
self.take_webcam_photo, |
|
[ |
|
webcam_image_in |
|
], |
|
[ |
|
webcam_photo_out |
|
] |
|
) |
|
webcam_video_action.click( |
|
self.take_webcam_video, |
|
[ |
|
webcam_video_in |
|
], |
|
[ |
|
webcam_video_out |
|
] |
|
) |
|
|
|
def launch_ui(self): |
|
self.ui_obj.launch() |
|
|
|
|
|
if __name__ == '__main__': |
|
my_app = gr.Blocks() |
|
face_ui = FaceProcessing(my_app) |
|
face_ui.create_ui() |
|
face_ui.launch_ui() |