Spaces:
Running
Running
import cv2 | |
import numpy as np | |
import matplotlib.pyplot as plt | |
from PIL import Image | |
from scipy.spatial.distance import cosine | |
import gradio as gr | |
from tensorflow.keras.models import Model | |
from tensorflow.keras.models import load_model | |
from tensorflow.saved_model import load | |
import pathlib | |
from fastai.vision.all import * | |
from fastai.imports import * | |
from tensorflow.keras.models import model_from_json | |
from mtcnn.mtcnn import MTCNN | |
json_file = open('model.json', 'r') | |
loaded_model_json = json_file.read() | |
json_file.close() | |
model = model_from_json(loaded_model_json) | |
model.load_weights('model.h5') | |
plt = platform.system() | |
if plt == 'Linux': | |
pathlib.WindowsPath = pathlib.PosixPath | |
def img_to_encoding(image_path, model): | |
img = Image.open(image_path) | |
if img is not None: | |
img = np.array(img) | |
img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_AREA) | |
x = np.expand_dims(img, axis=0) | |
embedding = model.predict(x)[0, :] | |
print(embedding) | |
return embedding | |
database = {} | |
database["dr adnan"] = img_to_encoding( | |
"86055fdb-7441-422e-b501-ffac2221dae0.jpg", model) | |
database["wasay"] = img_to_encoding("Wasay (1).jpg", model) | |
database["fatima"] = img_to_encoding("IMG_20220826_095746.jpg", model) | |
database["omer"] = img_to_encoding("Omer_1.jpg", model) | |
database["saad"] = img_to_encoding("IMG20220825113812.jpg", model) | |
database["waleed"] = img_to_encoding("IMG_20220825_113352.jpg", model) | |
database["talha"] = img_to_encoding("IMG20220825113526.jpg", model) | |
database["asfand"] = img_to_encoding("imgAsfand.jpg", model) | |
database["afrasiyab"] = img_to_encoding("imgAfra.jpg", model) | |
def who_is_it(image): | |
# START CODE HERE | |
# Step 1: Compute the target "encoding" for the image. Use img_to_encoding() see example above. ## (β 1 line) | |
if image is not None: | |
img = cv2.resize(image, (224, 224), interpolation=cv2.INTER_AREA) | |
x = np.expand_dims(img, axis=0) | |
encoding = model.predict(x)[0, :] | |
## Step 2: Find the closest encoding ## | |
# Initialize "min_dist" to a large value, say 100 (β1 line) | |
min_dist = 10000000 | |
identity = "Not in the database" | |
# Loop over the database dictionary's names and encodings. | |
for (name, db_enc) in database.items(): | |
# Compute L2 distance between the target "encoding" and the current db_enc from the database. (β 1 line) | |
dist = cosine(db_enc, encoding) | |
print(dist) | |
# If this distance is less than the min_dist, then set min_dist to dist, and identity to name. (β 3 lines) | |
if dist < min_dist: | |
min_dist = dist | |
identity = name | |
# END CODE HERE | |
if min_dist < 0.4: | |
return min_dist, identity | |
else: | |
return min_dist, ("Not in database") | |
def remove(Id): | |
del database[Id] | |
return Id + " removed successfully" | |
def add_new(newImg,newId): | |
if ((newImg is not None) and (newId is not None)): | |
faceModel = MTCNN() | |
faces=faceModel.detect_faces(newImg) | |
newImg=newImg[:,:,::-1] | |
for face in faces: | |
x,y,w,h = face["box"] | |
img=newImg[y:y+h,x:x+w] | |
if img is not None: | |
img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_AREA) | |
x = np.expand_dims(img, axis=0) | |
embedding = model.predict(x)[0, :] | |
database[str(newId)]=embedding | |
return newId + " added successfully!" | |
label = gr.outputs.Label() | |
faceModel=MTCNN() | |
def recog(image): | |
faces = faceModel.detect_faces(image) | |
image = image[:,:,::-1] | |
min_dist=1000 | |
for face in faces: | |
x,y,w,h = face["box"] | |
img = image[y:y+h, x:x+w] | |
if img is not None: | |
dist, identity=who_is_it(img) | |
if(dist<min_dist): | |
min_dist=dist | |
min_identity=identity | |
if(min_dist!=1000): | |
if min_identity!="Not in database": | |
return "Welcome to the Lab "+min_identity | |
else: | |
return "Sorry, we could not recognize you" | |
else: | |
return "No face found" | |
intf_del=gr.Interface(fn=remove, inputs=gr.Textbox(), outputs=label) | |
intf_recog = gr.Interface(fn=recog, inputs=gr.Image(type="numpy"), outputs=label) | |
intf_add = gr.Interface(fn=add_new, inputs=[gr.Image(type="numpy"),gr.Textbox()], outputs=label) | |
demo = gr.TabbedInterface([intf_recog, intf_add, intf_del], ["Recognize!", "Add New!", "Delete!"]) | |
demo.launch(inline=False) | |