facefinder / app.py
pleonard's picture
clickable labels for individual faces
216aba4
raw
history blame
2.26 kB
import gradio as gr
from deepface import DeepFace
import numpy as np
import PIL
from PIL import Image, ImageDraw, ImageFont
import time
dbackends = [
['Haar Cascade (OpenCV)','opencv'],
['Single Shot MultiBox Detector (OpenCV)','ssd'],
['Histogram of Oriented Gradients (Dlib)','dlib'],
['RetinaFace','retinaface'],
['You Only Look Once v8','yolov8'],
['YuNet','yunet'],
# ['Multi-task Cascade Convolutional Neural Network (TensorFlow) ','mtcnn'],
['Fast Multi-task Cascade Convolutional Neural Network (PyTorch)','fastmtcnn']
]
with gr.Blocks() as demo:
annotated_image = gr.AnnotatedImage()
#jsontext = gr.Text(label= "deepface extract_faces results")
selected_face = gr.Textbox(label="Selected Face", value="Click on a face above")
def findFaces(imgfile,dbackend):
start_time = time.time()
print(start_time)
face_objs = DeepFace.extract_faces(img_path = imgfile, enforce_detection = False, detector_backend = dbackend)
numberoffaces = len(face_objs)
jsontext = ''
global faceannotations
faceannotations = []
for i, face_obj in enumerate(face_objs,1):
face_coordinates = (face_obj["facial_area"]["x"],face_obj["facial_area"]["y"], (face_obj["facial_area"]["x"] + face_obj["facial_area"]["w"]),(face_obj["facial_area"]["y"] + face_obj["facial_area"]["h"]))
face_confidence = "Face " + str(i) + ": "+ "{:.0%}".format(face_obj["confidence"])
face_result=[face_coordinates,face_confidence]
faceannotations.append(face_result)
#jsontext=faceannotations
#jsontext=face_objs
run_time = str(round((time.time() - start_time),2))
results = gr.AnnotatedImage(
label= "Detected " + str(numberoffaces) + " faces via " + dbackend + ' in ' + run_time + ' seconds.',
value=(imgfile, faceannotations)
)
print(run_time)
return(results,numberoffaces,run_time)
dbackendchoice = gr.Radio(choices=dbackends,label='Detector Backend:',value='retinaface')
gr.Interface(
allow_flagging = "never",
fn=findFaces,
inputs=[gr.Image(value="8428_26_SM.jpg"), dbackendchoice],
outputs=[annotated_image,selected_face],
)
def select_section(evt: gr.SelectData):
return faceannotations[evt.index]
annotated_image.select(select_section, None, selected_face)
demo.launch(show_error=True)