suvadityamuk's picture
removed interpretability option
e6983ae
import gradio as gr
import numpy as np
import cv2
import os
from imutils import resize
import pickle
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import SVC
def calc_embeddings(all_files, names):
detector = cv2.dnn.readNetFromCaffe(
"deploy.prototxt.txt", "res10_300x300_ssd_iter_140000.caffemodel"
)
embedder = cv2.dnn.readNetFromTorch("openface.nn4.small2.v1.t7")
knownNames = []
knownEmbeddings = []
total = 0
for file in all_files:
name = names[total]
path = os.path.join(os.getcwd(), "celeb_dataset", name, file)
# f = open(f"/celeb_dataset/'{name}'/{file}", "rb")
f = open(path, "rb")
file_bytes = np.asarray(bytearray(f.read()), dtype=np.uint8)
image = cv2.imdecode(file_bytes, 1)
image = resize(image, width=600)
(h, w) = image.shape[:2]
imageBlob = cv2.dnn.blobFromImage(
cv2.resize(image, (300, 300)),
1.0,
(300, 300),
(104.0, 177.0, 123.0),
swapRB=False,
crop=False,
)
detector.setInput(imageBlob)
detections = detector.forward()
if len(detections) > 0:
i = np.argmax(detections[0, 0, :, 2])
confidence = detections[0, 0, i, 2]
if confidence > 0.5:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
face = image[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
if fW < 20 or fH < 20:
continue
faceBlob = cv2.dnn.blobFromImage(
face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False
)
embedder.setInput(faceBlob)
vec = embedder.forward()
knownNames.append(name)
knownEmbeddings.append(vec.flatten())
total += 1
with open("unknownEmbeddings.pkl", "rb") as fp:
l = pickle.load(fp)
with open("unknownNames.pkl", "rb") as fp:
n = pickle.load(fp)
for i in l:
knownEmbeddings.append(i)
knownNames = knownNames + n
return knownEmbeddings, knownNames
def recognize(embeddings, names):
le = LabelEncoder()
labels = le.fit_transform(names)
recognizer = SVC(C=1.0, kernel="linear", probability=True)
recognizer.fit(embeddings, names)
return le, recognizer
def run_inference(myImage):
# os.chdir("./celeb_dataset")
celebs = []
scores = dict()
for celeb in os.listdir("./celeb_dataset"):
files = []
names = []
if celeb in celebs:
continue
name = celeb
celebs.append(name)
for file in os.listdir(os.path.join(os.getcwd(), "celeb_dataset", celeb)):
files.append(file)
names.append(name)
embeddings, names = calc_embeddings(files, names)
le, model = recognize(embeddings, names)
detector = cv2.dnn.readNetFromCaffe(
"deploy.prototxt.txt",
"res10_300x300_ssd_iter_140000.caffemodel",
)
embedder = cv2.dnn.readNetFromTorch("openface.nn4.small2.v1.t7")
(h, w) = myImage.shape[:2]
imageBlob = cv2.dnn.blobFromImage(
cv2.resize(myImage, (300, 300)),
1.0,
(300, 300),
(104.0, 177.0, 123.0),
swapRB=False,
crop=False,
)
detector.setInput(imageBlob)
detections = detector.forward()
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.15:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
face = myImage[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
if fW < 20 or fH < 20:
continue
faceBlob = cv2.dnn.blobFromImage(
face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False
)
embedder.setInput(faceBlob)
vec = embedder.forward()
preds = model.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = le.classes_[j]
text = "{}: {:.2f}%".format(name, proba * 100)
scores[name] = proba
if len(scores) > 1:
del scores["Unknown"]
return scores
iface = gr.Interface(
fn=run_inference,
inputs="image",
outputs="label",
live=True,
title="Who do you look Like?!",
)
iface.launch()