facefinder / app.py
pleonard's picture
pull out individual faces
7beb285
import gradio as gr
from deepface import DeepFace
import numpy as np
import PIL
from PIL import Image, ImageDraw, ImageFont
import time
dbackends = [
['Haar Cascade (OpenCV)','opencv'],
['Single Shot MultiBox Detector (OpenCV)','ssd'],
['Histogram of Oriented Gradients (Dlib)','dlib'],
['RetinaFace','retinaface'],
['You Only Look Once v8','yolov8'],
['YuNet','yunet'],
# ['Multi-task Cascade Convolutional Neural Network (TensorFlow) ','mtcnn'],
['Fast Multi-task Cascade Convolutional Neural Network (PyTorch)','fastmtcnn']
]
with gr.Blocks() as demo:
input_image = gr.Image(value="8428_26_SM.jpg")
annotated_image = gr.AnnotatedImage()
#jsontext = gr.Text(label= "deepface extract_faces results")
selected_face_info = gr.Textbox(label="Selected Face Info", value="Click on a face above")
selected_face_pic = gr.Image(label="Selected Face", value="Click on a face above")
def findFaces(imgfile,dbackend):
start_time = time.time()
print(start_time)
face_objs = DeepFace.extract_faces(img_path = imgfile, enforce_detection = False, detector_backend = dbackend)
numberoffaces = len(face_objs)
jsontext = ''
global faceannotations
faceannotations = []
for i, face_obj in enumerate(face_objs,1):
face_coordinates = (face_obj["facial_area"]["x"],face_obj["facial_area"]["y"], (face_obj["facial_area"]["x"] + face_obj["facial_area"]["w"]),(face_obj["facial_area"]["y"] + face_obj["facial_area"]["h"]))
face_confidence = "Face " + str(i) + ": "+ "{:.0%}".format(face_obj["confidence"])
face_result=[face_coordinates,face_confidence]
faceannotations.append(face_result)
#jsontext=faceannotations
#jsontext=face_objs
run_time = str(round((time.time() - start_time),2))
results = gr.AnnotatedImage(
label= "Detected " + str(numberoffaces) + " faces via " + dbackend + ' in ' + run_time + ' seconds.',
value=(imgfile, faceannotations)
)
print(run_time)
return(results,numberoffaces,run_time,)
dbackendchoice = gr.Radio(choices=dbackends,label='Detector Backend:',value='retinaface')
gr.Interface(
allow_flagging = "never",
fn=findFaces,
inputs=[input_image, dbackendchoice],
outputs=[annotated_image,selected_face_info,selected_face_pic],
)
def select_section(evt: gr.SelectData):
cropped_image = np.array(Image.open(input_image.value['path']))
cropped_image = cropped_image[faceannotations[evt.index][0][1]:faceannotations[evt.index][0][3], faceannotations[evt.index][0][0]:faceannotations[evt.index][0][2]]
return faceannotations[evt.index], cropped_image
annotated_image.select(select_section, None, [selected_face_info,selected_face_pic])
demo.launch(show_error=True)