File size: 2,264 Bytes
8da06e3
 
 
 
 
 
 
589a4f1
8da06e3
e036234
 
 
ab4f2c1
 
 
589a4f1
ab4f2c1
8da06e3
 
589a4f1
 
 
 
 
 
216aba4
 
589a4f1
 
8da06e3
589a4f1
 
 
8da06e3
589a4f1
 
 
 
 
 
216aba4
589a4f1
216aba4
589a4f1
 
 
216aba4
589a4f1
 
 
 
 
 
 
 
216aba4
589a4f1
 
 
 
 
 
216aba4
589a4f1
 
 
 
 
8da06e3
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import gradio as gr
from deepface import DeepFace
import numpy as np
import PIL
from PIL import Image, ImageDraw, ImageFont
import time


dbackends = [
  ['Haar Cascade (OpenCV)','opencv'], 
  ['Single Shot MultiBox Detector (OpenCV)','ssd'], 
  ['Histogram of Oriented Gradients (Dlib)','dlib'],
  ['RetinaFace','retinaface'],
  ['You Only Look Once v8','yolov8'],
  ['YuNet','yunet'],
#   ['Multi-task Cascade Convolutional Neural Network (TensorFlow) ','mtcnn'], 
  ['Fast Multi-task Cascade Convolutional Neural Network (PyTorch)','fastmtcnn']
]


with gr.Blocks() as demo:


	annotated_image = gr.AnnotatedImage()

	#jsontext =  gr.Text(label= "deepface extract_faces results")
	selected_face = gr.Textbox(label="Selected Face", value="Click on a face above")

		
	
	def findFaces(imgfile,dbackend):
		start_time = time.time()
		print(start_time)
	
		face_objs = DeepFace.extract_faces(img_path = imgfile, enforce_detection = False, detector_backend = dbackend)

		numberoffaces = len(face_objs)
		jsontext = ''
		global faceannotations
		faceannotations = []
		for i, face_obj in enumerate(face_objs,1):
			face_coordinates = (face_obj["facial_area"]["x"],face_obj["facial_area"]["y"], (face_obj["facial_area"]["x"] + face_obj["facial_area"]["w"]),(face_obj["facial_area"]["y"] + face_obj["facial_area"]["h"]))
			face_confidence =  "Face " + str(i) + ": "+ "{:.0%}".format(face_obj["confidence"])
			face_result=[face_coordinates,face_confidence]
			faceannotations.append(face_result)
		
		#jsontext=faceannotations
		#jsontext=face_objs
		run_time = str(round((time.time() - start_time),2))
		results = gr.AnnotatedImage(
			label= "Detected " + str(numberoffaces) + " faces via " + dbackend + ' in ' + run_time + ' seconds.',
			value=(imgfile, faceannotations)
		)

		print(run_time)
		return(results,numberoffaces,run_time)

	dbackendchoice = gr.Radio(choices=dbackends,label='Detector Backend:',value='retinaface')
	gr.Interface(
		allow_flagging = "never",
		fn=findFaces,
		inputs=[gr.Image(value="8428_26_SM.jpg"), dbackendchoice],
		outputs=[annotated_image,selected_face],
	)
	def select_section(evt: gr.SelectData):
		return faceannotations[evt.index]

	annotated_image.select(select_section, None, selected_face)

demo.launch(show_error=True)