FaceRecogTUKL / app.py
wasay's picture
Add requirements file
9066861
raw
history blame
3.19 kB
import cv2
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from keras_vggface.vggface import VGGFace
from scipy.spatial.distance import cosine
from mtcnn.mtcnn import MTCNN
import gradio as gr
from tensorflow.keras.models import Model
import pathlib
plt = platform.system()
if plt == 'Linux':
pathlib.WindowsPath = pathlib.PosixPath
base_model = VGGFace(model="senet50")
# same as the following
x = base_model.layers[-2].output
model = Model(inputs=base_model.inputs, outputs=x, name="EmbeddingModel")
def img_to_encoding(image_path, model):
img = Image.open(image_path)
if img is not None:
img = np.array(img)
img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_AREA)
plt.imshow(img)
plt.show()
x = np.expand_dims(img, axis=0)
embedding = model.predict(x)[0, :]
print(embedding)
return embedding
database = {}
database["dr adnan"] = img_to_encoding(
"86055fdb-7441-422e-b501-ffac2221dae0.jpg", model)
database["wasay"] = img_to_encoding("Wasay (1).jpg", model)
database["fatima"] = img_to_encoding("IMG_20220826_095746.jpg", model)
database["omer"] = img_to_encoding("Omer_1.jpg", model)
database["saad"] = img_to_encoding("IMG20220825113812.jpg", model)
database["waleed"] = img_to_encoding("IMG_20220825_113352.jpg", model)
database["talha"] = img_to_encoding("IMG20220825113526.jpg", model)
database["asfand"] = img_to_encoding("imgAsfand.jpg", model)
database["afrasiyab"] = img_to_encoding("imgAfra.jpg", model)
def who_is_it(image):
# START CODE HERE
# Step 1: Compute the target "encoding" for the image. Use img_to_encoding() see example above. ## (β‰ˆ 1 line)
if image is not None:
img = cv2.resize(image, (224, 224), interpolation=cv2.INTER_AREA)
x = np.expand_dims(img, axis=0)
encoding = model.predict(x)[0, :]
## Step 2: Find the closest encoding ##
# Initialize "min_dist" to a large value, say 100 (β‰ˆ1 line)
min_dist = 10000000
identity = "Not in database"
# Loop over the database dictionary's names and encodings.
for (name, db_enc) in database.items():
# Compute L2 distance between the target "encoding" and the current db_enc from the database. (β‰ˆ 1 line)
dist = cosine(db_enc, encoding)
print(dist)
# If this distance is less than the min_dist, then set min_dist to dist, and identity to name. (β‰ˆ 3 lines)
if dist < min_dist:
min_dist = dist
identity = name
# END CODE HERE
if min_dist < 0.3:
return identity
else:
return ("Not in database")
image = gr.inputs.Image(shape=(250, 250))
label = gr.outputs.Label()
faceModel = MTCNN()
faces = faceModel.detect_faces(image)
for face in faces:
x, y, w, h = face["box"]
img = image[y:y+h, x:x+w]
if img is not None:
img = cv2.resize(img, (224, 224))
# prepare the image
img = np.expand_dims(img, axis=0)
else:
img = "No face found"
intf = gr.Interface(fn=who_is_it, inputs=img, outputs=label)
intf.launch(inline=False)