File size: 7,170 Bytes
791665a
 
6aa266b
204c205
791665a
 
8a666b1
 
e49c9d7
 
8a666b1
20fe935
 
 
8a666b1
791665a
 
 
 
 
 
3254ee1
 
110beda
 
 
 
 
791665a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8a666b1
 
 
 
791665a
 
 
 
 
 
 
 
 
 
 
3071e7b
791665a
 
 
 
 
 
8a666b1
1cba2c6
8a666b1
791665a
 
 
 
110beda
204c205
 
 
 
 
 
 
 
 
 
 
 
 
 
791665a
 
 
 
 
f7d2b1b
791665a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e087067
791665a
 
 
 
 
204c205
 
110beda
 
204c205
 
 
5c86d3c
 
204c205
 
 
 
 
791665a
 
 
204c205
791665a
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
from deepface import DeepFace
import gradio as gr

from PIL import Image, ImageColor
import cv2
import numpy as np
import math

FONT_SCALE = 8e-4  # Adjust for larger font size in all images
THICKNESS_SCALE = 4e-4  # Adjust for larger thickness in all images

import torch
from utils.facial_makeup import *
import torchvision.transforms as transforms

title = "DEEP FACE DEMO"
distance_metric = ["cosine", "euclidean", "euclidean_l2",]
detection_model = ["opencv", "retinaface", "mtcnn", "ssd", "dlib",]
recognition_model = ["VGG-Face", "Facenet", "OpenFace", "DeepFace", "DeepID", "ArcFace", "Dlib", "SFace",]
facial_recognition_example=[['./images/blackpink.jpg', './images/jennie.jpg'], ['./images/blackpink.jpg', './images/lisa.jpg'],\
                            ['./images/blackpink.jpg', './images/jisoo.jpg'], ['./images/blackpink.jpg', './images/rose.jpg']]
facial_analysis_example=[['./images/noone01.jpg'], ['./images/noone02.jpg'], ['./images/midu.jpg']]
facial_makeup_example=[['./images/noone01.jpg'], ['./images/noone02.jpg'], ['./images/midu.jpg']]
table = {
    'hair': 17,
    'upper_lip': 12,
    'lower_lip': 13
}

def facial_recognition(img1, img2, metric, detection, recognition):
	output = "One of the two photos does not have face."
	min_height = min(img1.shape[0],img2.shape[1])
	try:
		result = DeepFace.verify(img1_path = img1, img2_path = img2, detector_backend = detection, model_name = recognition, distance_metric=metric)
	except:
		img1 = cv2.resize(img1, (img1.shape[1], min_height), interpolation = cv2.INTER_AREA)
		img2 = cv2.resize(img2, (img2.shape[1], min_height), interpolation = cv2.INTER_AREA)
		output_img = np.concatenate((img1, img2), axis=1)
		return Image.fromarray(output_img), output 

	x1,y1,w1,h1 = result["facial_areas"]["img1"]["x"], result["facial_areas"]["img1"]["y"], result["facial_areas"]["img1"]["w"], result["facial_areas"]["img1"]["h"]
	cv2.rectangle(img1, (x1, y1), (x1 + w1, y1 + h1), (255,0,0), 4)
	img1 = cv2.resize(img1, (img1.shape[1], min_height), interpolation = cv2.INTER_AREA)
	x2,y2,w2,h2 = result["facial_areas"]["img2"]["x"], result["facial_areas"]["img2"]["y"], result["facial_areas"]["img2"]["w"], result["facial_areas"]["img2"]["h"]
	cv2.rectangle(img2, (x2, y2), (x2 + w2, y2 + h2), (255,0,0), 4)
	img2 = cv2.resize(img2, (img2.shape[1], min_height), interpolation = cv2.INTER_AREA)
	output_img = np.concatenate((img1, img2), axis=1)
	similarity = result["distance"]
	if result["verified"] is True:
		output = f"Two faces belong to the same person with a {metric} similarity of {similarity:.2f}."
	else:
		output = f"Two faces do not belong to the same person."

	return Image.fromarray(output_img), output 


def facial_analysis(img, detection):

	height, width, _ = img.shape
	font_scale = min(width, height) * FONT_SCALE
	thickness = math.ceil(min(width, height) * THICKNESS_SCALE)
	try:
		objs = DeepFace.analyze(img_path = img, actions = ['age', 'gender', 'race', 'emotion'], detector_backend = detection)
	except:
		return Image.fromarray(img)
	for obj in objs:
		x,y,w,h = obj["region"]["x"], obj["region"]["y"], obj["region"]["w"], obj["region"]["h"]
		age = obj["age"]
		gender = obj["dominant_gender"]
		race = obj["dominant_race"]
		emotion = obj["dominant_emotion"]
		cv2.rectangle(img, (x, y), (x + w, y + h), (255,0,0), 4)
		text =f"{age},{gender},{emotion}"

		cv2.putText(
			img,
			text,
			(int(x), int(y) - 10),
			fontFace = cv2.FONT_HERSHEY_SIMPLEX,
			fontScale = font_scale,
			color = (255, 0, 0),
			thickness=thickness
		)

	return Image.fromarray(img)


def facial_makeup(img_path,hair_color,lips_color):
	hair_rgb = ImageColor.getcolor(hair_color, "RGB")
	lips_rgb = ImageColor.getcolor(lips_color, "RGB")
	image = cv2.imread(img_path)
	parsing = evaluate(img_path)
	parsing = cv2.resize(parsing, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_NEAREST)
	parts = [table['hair'], table['upper_lip'], table['lower_lip']]

	image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
	colors = [list(hair_rgb), list(lips_rgb), list(lips_rgb)]
	for part, color in zip(parts, colors):
		image = hair(image, parsing, part, color)	
	return Image.fromarray(image)

def main():
	demo = gr.Blocks()
	with demo:
		gr.Markdown(title)
		inputs_metric = gr.Radio(choices=distance_metric,label='Distance Metric', value="cosine",show_label=True)
		inputs_detection = gr.Dropdown(choices=detection_model,label='Detection Model',value="retinaface",show_label=True)
		inputs_recognition = gr.Dropdown(choices=recognition_model,label='Recognition Model',value="ArcFace",show_label=True)    
		with gr.Tabs():
			with gr.TabItem('Facial Recognition'):
				with gr.Row():
					gr.Markdown("Input two images, the most similar faces between two images will be compared")
				with gr.Row():
					with gr.Column():
						Recognition_inputs_image1 = gr.Image(label='Image 1',interactive=True)
						Recognition_inputs_image2 = gr.Image(label='Image 2',interactive=True)
					with gr.Column():
						Recognition_outputs_image = gr.Image(type="pil", label="Output Image")
						# Recognition_outputs_text = gr.Textbox(label="Output")
						Recognition_outputs_text = gr.Label(label='Output')
				with gr.Row():
					Recognition_example_images = gr.Examples(examples=facial_recognition_example,inputs=[Recognition_inputs_image1,Recognition_inputs_image2])

				verify_but = gr.Button('Verify') 
			with gr.TabItem('Facial Analysis'):
				with gr.Row():
					gr.Markdown("Input image, return results including age, gender, race and emotion of all faces.")
				with gr.Row():
					with gr.Column():
						Analysis_inputs_image = gr.Image(label='Image',interactive=True)
					with gr.Column():
						Analysis_outputs_image = gr.Image(type="pil", label="Output Image")
				with gr.Row():
					Analysis_example_images = gr.Examples(examples=facial_analysis_example,inputs=[Analysis_inputs_image])
				analysis_but = gr.Button("Analysis")

			with gr.TabItem('Facial MakeUp'):
				with gr.Row():
					gr.Markdown("Input image, choose hair and lips color, return image with selected makeup.")
				with gr.Row():
					with gr.Column():
						MakeUp_inputs_image = gr.Image(label='Image',type='filepath',interactive=True)
						MakeUp_inputs_hair = gr.ColorPicker(label="Hair Color")
						MakeUp_inputs_lips = gr.ColorPicker(label="Lips Color")
					with gr.Column():
						MakeUp_outputs_image = gr.Image(type="pil", label="Output Image")
				with gr.Row():
					MakeUp_example_images = gr.Examples(examples=facial_makeup_example,inputs=[MakeUp_inputs_image])
				makeup_but = gr.Button("MakeUp")
		verify_but.click(facial_recognition,inputs=[Recognition_inputs_image1,Recognition_inputs_image2,inputs_metric,inputs_detection,inputs_recognition],\
						outputs=[Recognition_outputs_image,Recognition_outputs_text],queue=True)
		analysis_but.click(facial_analysis,inputs=[Analysis_inputs_image,inputs_detection],outputs=[Analysis_outputs_image],queue=True)
		makeup_but.click(facial_makeup,inputs=[MakeUp_inputs_image,MakeUp_inputs_hair,MakeUp_inputs_lips],outputs=[MakeUp_outputs_image],queue=True)

	demo.launch(debug=True,enable_queue=True,server_name="0.0.0.0")



if __name__ == "__main__":
	main()