compare_faces / app.py
HienK64BKHN's picture
Upload app.py
21d4423 verified
import torch
import torchvision
from PIL import Image
from models.mtcnn import MTCNN
from models.inception_resnet_v1 import InceptionResnetV1
from typing import Tuple
import gradio as gr
device = 'cuda' if torch.cuda.is_available() else 'cpu'
mtcnn = MTCNN(
image_size=160, margin=0, min_face_size=20,
thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True,
device='cpu'
)
resnet = InceptionResnetV1(pretrained='vggface2').eval().to('cpu')
def compare(img_1, img_2) -> Tuple[str, float]:
img_1_aligned = mtcnn(img_1).unsqueeze(dim=0).to('cpu')
img_2_aligned = mtcnn(img_2).unsqueeze(dim=0).to('cpu')
result = round((resnet(img_1_aligned) - resnet(img_2_aligned)).norm().item(), 3)
if result < 0.8:
return "The same person.", result
else:
return "2 different persons.", result
title = "Compare Faces"
description = "A FaceNet (MTCNN for the bounding boxes and Inception Resnet V1 for the feature extracting, trainning with VGGFace2 dataset) feature extractor computer vision model to classify images of food as pizza, steak or sushi."
article = "Created by HienK64BKHN."
demo = gr.Interface(fn=compare, # mapping function from input to output
inputs=[gr.Image(type="pil", label="The first person"),
gr.Image(type="pil", label="The second one")], # what are the inputs?
outputs=[gr.Textbox(label="Result"),
gr.Number(label="How different? (The smaller the number is, the smaller the difference between two faces)")], # our fn has two outputs, therefore we have two outputs
title=title,
description=description,
article=article
# Create examples list from "examples/" directory
)
demo.launch(debug=False)