File size: 5,443 Bytes
f347761 500f010 f347761 1e1f35f f347761 500f010 f347761 5f0e788 500f010 b58260f f347761 b58260f 500f010 818f301 7cf8e2a 343b529 5d3977e 818f301 b58260f 818f301 1e1f35f 5d3977e 1e1f35f 80e72b0 1e1f35f 80e72b0 1e1f35f 80e72b0 24eb744 818f301 500f010 818f301 500f010 b58260f 818f301 500f010 b97e62c a40427c 500f010 b97e62c 500f010 b97e62c b58260f 1e1f35f b97e62c 343b529 b97e62c 63eb88f 1e1f35f 63eb88f 1e1f35f b97e62c 500f010 b58260f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
import gradio as gr
import numpy as np
from deepface import DeepFace
from pymongo.mongo_client import MongoClient
import cv2
credentials = "jamshaid:jamshaid19gh"
uri = f"mongodb+srv://{credentials}@cluster0.uimyui3.mongodb.net/?retryWrites=true&w=majority"
client = MongoClient(uri)
db = client["Face_identification"]
identities_collection = db["face_identities"]
model_name="Facenet"
debug=False
def save_identity(image , name):
try:
embeddings = DeepFace.represent(image , model_name=model_name , detector_backend = "retinaface")
embeddings = embeddings[0]
identity = {"embeddings":embeddings["embedding"] , "name" : name }
result = identities_collection.insert_one(identity)
return f"{name} stored in database successfully.It is recommended to add 2 or 3 high quality images for one person"
except Exception as error:
return str(error)
def findCosineDistance(source_representation, test_representation):
a = np.matmul(np.transpose(source_representation), test_representation)
b = np.sum(np.multiply(source_representation, source_representation))
c = np.sum(np.multiply(test_representation, test_representation))
return 1 - (a / (np.sqrt(b) * np.sqrt(c)))
def findThreshold(model_name, distance_metric):
base_threshold = {"cosine": 0.40, "euclidean": 0.55, "euclidean_l2": 0.75}
thresholds = {
"VGG-Face": {"cosine": 0.40, "euclidean": 0.60, "euclidean_l2": 0.86},
"Facenet": {"cosine": 0.40, "euclidean": 10, "euclidean_l2": 0.80},
"Facenet512": {"cosine": 0.30, "euclidean": 23.56, "euclidean_l2": 1.04},
"ArcFace": {"cosine": 0.68, "euclidean": 4.15, "euclidean_l2": 1.13},
"Dlib": {"cosine": 0.07, "euclidean": 0.6, "euclidean_l2": 0.4},
"SFace": {"cosine": 0.593, "euclidean": 10.734, "euclidean_l2": 1.055},
"OpenFace": {"cosine": 0.10, "euclidean": 0.55, "euclidean_l2": 0.55},
"DeepFace": {"cosine": 0.23, "euclidean": 64, "euclidean_l2": 0.64},
"DeepID": {"cosine": 0.015, "euclidean": 45, "euclidean_l2": 0.17},
}
threshold = thresholds.get(model_name, base_threshold).get(distance_metric, 0.4)
return threshold
threshold = findThreshold(model_name , "cosine")
def predict_image(image):
original_image = np.copy(image)
if debug:
print("1")
# getting face embeddings from database
results = identities_collection.find()
faces = [dict(result) for result in results]
if debug:
print("2")
# generate face embeddings for all detected faces in image
target_embedding_array = DeepFace.represent(
img_path=image,
model_name=model_name,
detector_backend = "retinaface"
)
identities = []
# for each face compare its embeddings with all face embeddings in database
for target_embedding_obj in target_embedding_array:
target_embedding = target_embedding_obj["embedding"]
if debug:
print("4")
# compare the face embedding with all other faces
name = "Unknown"
for face in faces:
distance = findCosineDistance(face["embeddings"], target_embedding)
if distance <= threshold:
name = face["name"]
break
if debug:
print("5")
identities.append({"name":name , "facial_area":target_embedding_obj["facial_area"]})
output_img = np.copy(original_image)
for identity in identities:
# Draw the rectangle on the image
x = identity["facial_area"]["x"]
y = identity["facial_area"]["y"]
w = identity["facial_area"]["w"]
h = identity["facial_area"]["h"]
cv2.rectangle(output_img, (x,y), (x+w,y+h), (0, 0, 255), 1)
# Define the text position
text_position = (x, y+h+5)
# Add the text to the image
cv2.putText(output_img ,identity["name"], text_position, cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255,0 ), 1)
return output_img
# image_input = gr.inputs.Image(shape=(160,160))
# Create the Gradio interface
# gr.Interface(fn=predict_image, inputs=image_input, outputs=label_output).launch()
# Create Gradio interfaces for input and output
image_input = gr.inputs.Image(shape=(160, 160))
label_input = gr.inputs.Textbox(label="Enter Name")
label_output = gr.outputs.Textbox()
# Create the Gradio interface
interface1 = gr.Interface(
fn=save_identity,
inputs=[image_input, label_input],
outputs=label_output,
title="Face Identification",
description="Upload an image, enter the person name and store the person in database",
)
# Create Gradio interfaces for image input and output
image_input2 = gr.inputs.Image(shape=(160,160))
output_image = gr.outputs.Image(type="numpy")
# output_image = gr.outputs.Textbox()
# Create the Gradio interface for image input and output
interface2 = gr.Interface(
fn=predict_image,
inputs=image_input2,
outputs=output_image,
title="Face Identification",
description="Upload an image and get the identity of person",
)
# Create the Gradio interface with two tabs
# interface = gr.Interface(title="Face identification App")
# interface.add_view(interface1, "Save", "Add new person")
# interface.add_view(interface2, "Predict", "Get identity of person")
gr.TabbedInterface(
[interface2 , interface1],
tab_names=["Predict Persons","Add new Person"]
).queue().launch()
# Launch the Gradio interface
# interface.launch()
|