facefinder / app.py
pleonard's picture
Create app.py
8da06e3 verified
raw
history blame
1.61 kB
import gradio as gr
from deepface import DeepFace
import numpy as np
import PIL
from PIL import Image, ImageDraw, ImageFont
import time
dbackends = [
'opencv',
'ssd',
'dlib',
'mtcnn',
'retinaface',
'mediapipe',
'yolov8',
'yunet',
'fastmtcnn',
]
annotated_image = gr.AnnotatedImage()
jsontext = gr.Text(label= "deepface extract_faces results",)
def findFaces(imgfile,dbackend):
start_time = time.time()
print(start_time)
face_objs = DeepFace.extract_faces(img_path = imgfile, enforce_detection = False, detector_backend = dbackend)
numberoffaces = len(face_objs)
jsontext = ''
faceannotations = []
for face_obj in face_objs:
face_coordinates = (face_obj["facial_area"]["x"],face_obj["facial_area"]["y"], (face_obj["facial_area"]["x"] + face_obj["facial_area"]["w"]),(face_obj["facial_area"]["y"] + face_obj["facial_area"]["h"]))
face_confidence = "{:.0%}".format(face_obj["confidence"])
face_result=[face_coordinates,face_confidence]
faceannotations.append(face_result)
jsontext=faceannotations
run_time = str(round((time.time() - start_time),2))
results = gr.AnnotatedImage(
label= "Detected " + str(numberoffaces) + " faces via " + dbackend + ' in ' + run_time + ' seconds.',
value=(imgfile, faceannotations)
)
print(run_time)
return(results,jsontext,numberoffaces,run_time)
dbackendchoice = gr.Radio(choices=dbackends,label='Detector Backend:')
demo = gr.Interface(
allow_flagging = "never",
fn=findFaces,
inputs=[gr.Image(value="8428_26_SM.jpg"), dbackendchoice],
outputs=[annotated_image,jsontext],
)
demo.launch(show_error=True)