File size: 4,679 Bytes
520c0fb 471c9f3 520c0fb 471c9f3 520c0fb e6983ae fc1f1b8 520c0fb 471c9f3 520c0fb 471c9f3 520c0fb 471c9f3 520c0fb 471c9f3 520c0fb e6983ae 520c0fb 471c9f3 520c0fb 471c9f3 520c0fb fd4e202 520c0fb 471c9f3 e6983ae 471c9f3 520c0fb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 |
import gradio as gr
import numpy as np
import cv2
import os
from imutils import resize
import pickle
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import SVC
def calc_embeddings(all_files, names):
detector = cv2.dnn.readNetFromCaffe(
"deploy.prototxt.txt", "res10_300x300_ssd_iter_140000.caffemodel"
)
embedder = cv2.dnn.readNetFromTorch("openface.nn4.small2.v1.t7")
knownNames = []
knownEmbeddings = []
total = 0
for file in all_files:
name = names[total]
path = os.path.join(os.getcwd(), "celeb_dataset", name, file)
# f = open(f"/celeb_dataset/'{name}'/{file}", "rb")
f = open(path, "rb")
file_bytes = np.asarray(bytearray(f.read()), dtype=np.uint8)
image = cv2.imdecode(file_bytes, 1)
image = resize(image, width=600)
(h, w) = image.shape[:2]
imageBlob = cv2.dnn.blobFromImage(
cv2.resize(image, (300, 300)),
1.0,
(300, 300),
(104.0, 177.0, 123.0),
swapRB=False,
crop=False,
)
detector.setInput(imageBlob)
detections = detector.forward()
if len(detections) > 0:
i = np.argmax(detections[0, 0, :, 2])
confidence = detections[0, 0, i, 2]
if confidence > 0.5:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
face = image[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
if fW < 20 or fH < 20:
continue
faceBlob = cv2.dnn.blobFromImage(
face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False
)
embedder.setInput(faceBlob)
vec = embedder.forward()
knownNames.append(name)
knownEmbeddings.append(vec.flatten())
total += 1
with open("unknownEmbeddings.pkl", "rb") as fp:
l = pickle.load(fp)
with open("unknownNames.pkl", "rb") as fp:
n = pickle.load(fp)
for i in l:
knownEmbeddings.append(i)
knownNames = knownNames + n
return knownEmbeddings, knownNames
def recognize(embeddings, names):
le = LabelEncoder()
labels = le.fit_transform(names)
recognizer = SVC(C=1.0, kernel="linear", probability=True)
recognizer.fit(embeddings, names)
return le, recognizer
def run_inference(myImage):
# os.chdir("./celeb_dataset")
celebs = []
scores = dict()
for celeb in os.listdir("./celeb_dataset"):
files = []
names = []
if celeb in celebs:
continue
name = celeb
celebs.append(name)
for file in os.listdir(os.path.join(os.getcwd(), "celeb_dataset", celeb)):
files.append(file)
names.append(name)
embeddings, names = calc_embeddings(files, names)
le, model = recognize(embeddings, names)
detector = cv2.dnn.readNetFromCaffe(
"deploy.prototxt.txt",
"res10_300x300_ssd_iter_140000.caffemodel",
)
embedder = cv2.dnn.readNetFromTorch("openface.nn4.small2.v1.t7")
(h, w) = myImage.shape[:2]
imageBlob = cv2.dnn.blobFromImage(
cv2.resize(myImage, (300, 300)),
1.0,
(300, 300),
(104.0, 177.0, 123.0),
swapRB=False,
crop=False,
)
detector.setInput(imageBlob)
detections = detector.forward()
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.15:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
face = myImage[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
if fW < 20 or fH < 20:
continue
faceBlob = cv2.dnn.blobFromImage(
face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False
)
embedder.setInput(faceBlob)
vec = embedder.forward()
preds = model.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = le.classes_[j]
text = "{}: {:.2f}%".format(name, proba * 100)
scores[name] = proba
if len(scores) > 1:
del scores["Unknown"]
return scores
iface = gr.Interface(
fn=run_inference,
inputs="image",
outputs="label",
live=True,
title="Who do you look Like?!",
)
iface.launch()
|