chauhuynh90's picture
Update app.py
6aa266b
from deepface import DeepFace
import gradio as gr
from PIL import Image, ImageColor
import cv2
import numpy as np
import math
FONT_SCALE = 8e-4 # Adjust for larger font size in all images
THICKNESS_SCALE = 4e-4 # Adjust for larger thickness in all images
import torch
from utils.facial_makeup import *
import torchvision.transforms as transforms
title = "DEEP FACE DEMO"
distance_metric = ["cosine", "euclidean", "euclidean_l2",]
detection_model = ["opencv", "retinaface", "mtcnn", "ssd", "dlib",]
recognition_model = ["VGG-Face", "Facenet", "OpenFace", "DeepFace", "DeepID", "ArcFace", "Dlib", "SFace",]
facial_recognition_example=[['./images/blackpink.jpg', './images/jennie.jpg'], ['./images/blackpink.jpg', './images/lisa.jpg'],\
['./images/blackpink.jpg', './images/jisoo.jpg'], ['./images/blackpink.jpg', './images/rose.jpg']]
facial_analysis_example=[['./images/noone01.jpg'], ['./images/noone02.jpg'], ['./images/midu.jpg']]
facial_makeup_example=[['./images/noone01.jpg'], ['./images/noone02.jpg'], ['./images/midu.jpg']]
table = {
'hair': 17,
'upper_lip': 12,
'lower_lip': 13
}
def facial_recognition(img1, img2, metric, detection, recognition):
output = "One of the two photos does not have face."
min_height = min(img1.shape[0],img2.shape[1])
try:
result = DeepFace.verify(img1_path = img1, img2_path = img2, detector_backend = detection, model_name = recognition, distance_metric=metric)
except:
img1 = cv2.resize(img1, (img1.shape[1], min_height), interpolation = cv2.INTER_AREA)
img2 = cv2.resize(img2, (img2.shape[1], min_height), interpolation = cv2.INTER_AREA)
output_img = np.concatenate((img1, img2), axis=1)
return Image.fromarray(output_img), output
x1,y1,w1,h1 = result["facial_areas"]["img1"]["x"], result["facial_areas"]["img1"]["y"], result["facial_areas"]["img1"]["w"], result["facial_areas"]["img1"]["h"]
cv2.rectangle(img1, (x1, y1), (x1 + w1, y1 + h1), (255,0,0), 4)
img1 = cv2.resize(img1, (img1.shape[1], min_height), interpolation = cv2.INTER_AREA)
x2,y2,w2,h2 = result["facial_areas"]["img2"]["x"], result["facial_areas"]["img2"]["y"], result["facial_areas"]["img2"]["w"], result["facial_areas"]["img2"]["h"]
cv2.rectangle(img2, (x2, y2), (x2 + w2, y2 + h2), (255,0,0), 4)
img2 = cv2.resize(img2, (img2.shape[1], min_height), interpolation = cv2.INTER_AREA)
output_img = np.concatenate((img1, img2), axis=1)
similarity = result["distance"]
if result["verified"] is True:
output = f"Two faces belong to the same person with a {metric} similarity of {similarity:.2f}."
else:
output = f"Two faces do not belong to the same person."
return Image.fromarray(output_img), output
def facial_analysis(img, detection):
height, width, _ = img.shape
font_scale = min(width, height) * FONT_SCALE
thickness = math.ceil(min(width, height) * THICKNESS_SCALE)
try:
objs = DeepFace.analyze(img_path = img, actions = ['age', 'gender', 'race', 'emotion'], detector_backend = detection)
except:
return Image.fromarray(img)
for obj in objs:
x,y,w,h = obj["region"]["x"], obj["region"]["y"], obj["region"]["w"], obj["region"]["h"]
age = obj["age"]
gender = obj["dominant_gender"]
race = obj["dominant_race"]
emotion = obj["dominant_emotion"]
cv2.rectangle(img, (x, y), (x + w, y + h), (255,0,0), 4)
text =f"{age},{gender},{emotion}"
cv2.putText(
img,
text,
(int(x), int(y) - 10),
fontFace = cv2.FONT_HERSHEY_SIMPLEX,
fontScale = font_scale,
color = (255, 0, 0),
thickness=thickness
)
return Image.fromarray(img)
def facial_makeup(img_path,hair_color,lips_color):
hair_rgb = ImageColor.getcolor(hair_color, "RGB")
lips_rgb = ImageColor.getcolor(lips_color, "RGB")
image = cv2.imread(img_path)
parsing = evaluate(img_path)
parsing = cv2.resize(parsing, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_NEAREST)
parts = [table['hair'], table['upper_lip'], table['lower_lip']]
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
colors = [list(hair_rgb), list(lips_rgb), list(lips_rgb)]
for part, color in zip(parts, colors):
image = hair(image, parsing, part, color)
return Image.fromarray(image)
def main():
demo = gr.Blocks()
with demo:
gr.Markdown(title)
inputs_metric = gr.Radio(choices=distance_metric,label='Distance Metric', value="cosine",show_label=True)
inputs_detection = gr.Dropdown(choices=detection_model,label='Detection Model',value="retinaface",show_label=True)
inputs_recognition = gr.Dropdown(choices=recognition_model,label='Recognition Model',value="ArcFace",show_label=True)
with gr.Tabs():
with gr.TabItem('Facial Recognition'):
with gr.Row():
gr.Markdown("Input two images, the most similar faces between two images will be compared")
with gr.Row():
with gr.Column():
Recognition_inputs_image1 = gr.Image(label='Image 1',interactive=True)
Recognition_inputs_image2 = gr.Image(label='Image 2',interactive=True)
with gr.Column():
Recognition_outputs_image = gr.Image(type="pil", label="Output Image")
# Recognition_outputs_text = gr.Textbox(label="Output")
Recognition_outputs_text = gr.Label(label='Output')
with gr.Row():
Recognition_example_images = gr.Examples(examples=facial_recognition_example,inputs=[Recognition_inputs_image1,Recognition_inputs_image2])
verify_but = gr.Button('Verify')
with gr.TabItem('Facial Analysis'):
with gr.Row():
gr.Markdown("Input image, return results including age, gender, race and emotion of all faces.")
with gr.Row():
with gr.Column():
Analysis_inputs_image = gr.Image(label='Image',interactive=True)
with gr.Column():
Analysis_outputs_image = gr.Image(type="pil", label="Output Image")
with gr.Row():
Analysis_example_images = gr.Examples(examples=facial_analysis_example,inputs=[Analysis_inputs_image])
analysis_but = gr.Button("Analysis")
with gr.TabItem('Facial MakeUp'):
with gr.Row():
gr.Markdown("Input image, choose hair and lips color, return image with selected makeup.")
with gr.Row():
with gr.Column():
MakeUp_inputs_image = gr.Image(label='Image',type='filepath',interactive=True)
MakeUp_inputs_hair = gr.ColorPicker(label="Hair Color")
MakeUp_inputs_lips = gr.ColorPicker(label="Lips Color")
with gr.Column():
MakeUp_outputs_image = gr.Image(type="pil", label="Output Image")
with gr.Row():
MakeUp_example_images = gr.Examples(examples=facial_makeup_example,inputs=[MakeUp_inputs_image])
makeup_but = gr.Button("MakeUp")
verify_but.click(facial_recognition,inputs=[Recognition_inputs_image1,Recognition_inputs_image2,inputs_metric,inputs_detection,inputs_recognition],\
outputs=[Recognition_outputs_image,Recognition_outputs_text],queue=True)
analysis_but.click(facial_analysis,inputs=[Analysis_inputs_image,inputs_detection],outputs=[Analysis_outputs_image],queue=True)
makeup_but.click(facial_makeup,inputs=[MakeUp_inputs_image,MakeUp_inputs_hair,MakeUp_inputs_lips],outputs=[MakeUp_outputs_image],queue=True)
demo.launch(debug=True,enable_queue=True,server_name="0.0.0.0")
if __name__ == "__main__":
main()