Spaces:
Runtime error
Runtime error
jirufengyu
commited on
Commit
•
f5f3b58
1
Parent(s):
482c2c9
init app
Browse files- gradiodpl.py +118 -0
- images/img_emb/asdf/asdf_0.npy +0 -0
- images/ori_images/asdf/asdf_0.jpg +0 -0
- local.py +4 -0
- style.css +10 -0
- utils/__pycache__/face_rec.cpython-310.pyc +0 -0
- utils/face_rec.py +103 -0
- video.py +115 -0
- videofast.py +160 -0
gradiodpl.py
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from PIL import Image
|
3 |
+
import os
|
4 |
+
from utils.face_rec import input_an_image, update_ind2person, image_rec
|
5 |
+
import cv2
|
6 |
+
import numpy as np
|
7 |
+
ind2person=dict()
|
8 |
+
def video_identity(video):
|
9 |
+
return video
|
10 |
+
def str_intercept(img_path):
|
11 |
+
img_path_ = img_path[::-1]
|
12 |
+
point_index = 0
|
13 |
+
slash_index = 0
|
14 |
+
|
15 |
+
flag_pi = 0
|
16 |
+
flag_si = 0
|
17 |
+
|
18 |
+
for i in range(len(img_path_)):
|
19 |
+
if (img_path_[i] == "." and flag_pi == 0):
|
20 |
+
point_index = i
|
21 |
+
flag_pi = 1
|
22 |
+
|
23 |
+
if (img_path_[i] == "/" and flag_si == 0):
|
24 |
+
slash_index = i
|
25 |
+
flag_si = 1
|
26 |
+
|
27 |
+
point_index = len(img_path) - 1 - point_index
|
28 |
+
slash_index = len(img_path) - 1 - slash_index
|
29 |
+
|
30 |
+
return point_index, slash_index
|
31 |
+
def face_entry(img_path, name_text):
|
32 |
+
if img_path == "" or name_text == "" or img_path is None or name_text is None:
|
33 |
+
return None, None, None
|
34 |
+
#point_index, slash_index = str_intercept(img_path)
|
35 |
+
#img_renamePath = f"{img_path[:slash_index+1]}{name_text}{img_path[point_index:]}"
|
36 |
+
#os.rename(img_path, img_renamePath)
|
37 |
+
#img_ = Image.open(img_renamePath)
|
38 |
+
img_ = Image.open(img_path)
|
39 |
+
|
40 |
+
emb = input_an_image(img_, name_text)
|
41 |
+
update_ind2person(ind2person, emb, name_text)
|
42 |
+
name_text='upload '+name_text+' image done!'
|
43 |
+
return name_text
|
44 |
+
|
45 |
+
def face_rec_img(image):
|
46 |
+
known_face_encodings=[v['emb'] for k,v in ind2person.items()]
|
47 |
+
image = cv2.cvtColor(np.array(image),cv2.COLOR_RGB2BGR)
|
48 |
+
image=image_rec(image,known_face_encodings=known_face_encodings,_ind2person=ind2person)
|
49 |
+
image = Image.fromarray(cv2.cvtColor(image,cv2.COLOR_BGR2RGB))
|
50 |
+
return image
|
51 |
+
|
52 |
+
def change_input_image(choice):
|
53 |
+
if choice == "camera":
|
54 |
+
return gr.Image(image_mode="RGB", source="webcam", type="filepath", label="upload face image",visible=True)
|
55 |
+
elif choice == "upload":
|
56 |
+
return gr.Image(image_mode="RGB", source="upload", type="filepath", label="upload face image",visible=True)
|
57 |
+
else:
|
58 |
+
return gr.Image.update(visible=False)
|
59 |
+
|
60 |
+
def main():
|
61 |
+
with gr.Blocks(css='style.css') as demo:
|
62 |
+
with gr.Row():
|
63 |
+
gr.Markdown("Capture Face", elem_id="md1")
|
64 |
+
with gr.Row():
|
65 |
+
# radio = gr.Radio(["camera","upload"], label="capture face image in your camera or upload face image")
|
66 |
+
with gr.Column(scale=2):
|
67 |
+
with gr.Row():
|
68 |
+
input_img = gr.Image(image_mode="RGB", source="webcam", type="filepath", label="capture face image")
|
69 |
+
# input_img = gr.Image(image_mode="RGB",interactive=True)
|
70 |
+
# radio.change(fn=change_input_image, inputs=radio, outputs=input_img)
|
71 |
+
with gr.Column(scale=1):
|
72 |
+
with gr.Row():
|
73 |
+
input_name = gr.Textbox(label="input person name")
|
74 |
+
with gr.Row():
|
75 |
+
btn = gr.Button(value="upload face image")
|
76 |
+
with gr.Row():
|
77 |
+
output_name = gr.Textbox(label="echo")
|
78 |
+
|
79 |
+
with gr.Row():
|
80 |
+
gr.Markdown("Face Recognition", elem_id="md1")
|
81 |
+
with gr.Row():
|
82 |
+
|
83 |
+
with gr.Column():
|
84 |
+
with gr.Row():
|
85 |
+
img2rec = gr.Image(image_mode="RGB", source="webcam", type="pil", label="upload face image")
|
86 |
+
with gr.Row():
|
87 |
+
btn_img_rec = gr.Button(value="upload face image")
|
88 |
+
with gr.Column():
|
89 |
+
# with gr.Row():
|
90 |
+
# input_name = gr.Textbox(label="input person name")
|
91 |
+
with gr.Row():
|
92 |
+
output_rec = gr.Image(image_mode="RGB", source="upload", type="pil", label="rec image")
|
93 |
+
|
94 |
+
# with gr.Row():
|
95 |
+
# gr.Markdown("Video Face Recognition")
|
96 |
+
# with gr.Row():
|
97 |
+
|
98 |
+
# with gr.Column():
|
99 |
+
# with gr.Row():
|
100 |
+
# img2rec = gr.Image(image_mode="RGB", source="webcam", type="pil", label="upload face image")
|
101 |
+
# with gr.Row():
|
102 |
+
# btn_img_rec = gr.Button(value="upload face image")
|
103 |
+
# with gr.Column():
|
104 |
+
# # with gr.Row():
|
105 |
+
# # input_name = gr.Textbox(label="input person name")
|
106 |
+
# with gr.Row():
|
107 |
+
# output_rec = gr.Image(image_mode="RGB", source="upload", type="pil", label="rec image")
|
108 |
+
|
109 |
+
btn.click(fn=face_entry, inputs=[input_img,input_name],outputs=[output_name])
|
110 |
+
btn_img_rec.click(fn=face_rec_img, inputs=[img2rec], outputs=[output_rec])
|
111 |
+
# video=gr.Video(source='webcam')
|
112 |
+
# demo = gr.Interface(video_identity,video,"video")
|
113 |
+
|
114 |
+
return demo
|
115 |
+
if __name__=='__main__':
|
116 |
+
demo=main()
|
117 |
+
demo.launch(share=True)
|
118 |
+
# demo.launch()
|
images/img_emb/asdf/asdf_0.npy
ADDED
Binary file (1.15 kB). View file
|
|
images/ori_images/asdf/asdf_0.jpg
ADDED
local.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tkinter
|
2 |
+
import cv2
|
3 |
+
import os
|
4 |
+
from .utils.face_rec import input_an_image, update_ind2person
|
style.css
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
h1 {
|
2 |
+
text-align: center;
|
3 |
+
}
|
4 |
+
|
5 |
+
#content_align {
|
6 |
+
text-align: center;
|
7 |
+
}
|
8 |
+
md1 {
|
9 |
+
font-size: 15;
|
10 |
+
}
|
utils/__pycache__/face_rec.cpython-310.pyc
ADDED
Binary file (2.82 kB). View file
|
|
utils/face_rec.py
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import face_recognition
|
2 |
+
import os
|
3 |
+
import numpy as np
|
4 |
+
from PIL import Image
|
5 |
+
import random
|
6 |
+
import cv2
|
7 |
+
|
8 |
+
def update_ind2person(ind2person, emb, person):
|
9 |
+
ind2person[len(list(ind2person.values()))]=dict(person=person,emb=emb)
|
10 |
+
print(f"dict ind2person update: {person}!!!")
|
11 |
+
return ind2person
|
12 |
+
def input_an_image(image, person_name, ori_img_dir='images/ori_images',img_emb_dir='images/img_emb', save_ori_img=True):
|
13 |
+
"""
|
14 |
+
args:
|
15 |
+
image: PIL Image
|
16 |
+
person_name: str
|
17 |
+
"""
|
18 |
+
image_file_dir=os.path.join(ori_img_dir,person_name)
|
19 |
+
emb_file_dir=os.path.join(img_emb_dir,person_name)
|
20 |
+
if not os.path.exists(image_file_dir):
|
21 |
+
os.mkdir(image_file_dir)
|
22 |
+
os.mkdir(emb_file_dir)
|
23 |
+
file_ind=0
|
24 |
+
else:
|
25 |
+
file_ind=len(os.listdir(image_file_dir))
|
26 |
+
# file_ = face_recognition.load_image_file(image_file)
|
27 |
+
if save_ori_img:
|
28 |
+
image.save(os.path.join(image_file_dir,person_name+f'_{file_ind}.jpg'))
|
29 |
+
file_ = np.array(image)
|
30 |
+
emb = face_recognition.face_encodings(file_)[0]
|
31 |
+
emb_file=person_name+f'_{file_ind}.npy'
|
32 |
+
emb_file_out_path=os.path.join(emb_file_dir,emb_file)
|
33 |
+
np.save(emb_file_out_path, emb)
|
34 |
+
return emb
|
35 |
+
|
36 |
+
def init_load_embs(img_emb_dir='images/img_emb'):
|
37 |
+
persons=os.listdir(img_emb_dir)
|
38 |
+
i=0
|
39 |
+
ind2person=dict()
|
40 |
+
for oneperson in persons:
|
41 |
+
oneperson_dir=os.path.join(img_emb_dir,oneperson)
|
42 |
+
oneperson_list=os.listdir(oneperson_dir)
|
43 |
+
for oneperson_j in oneperson_list:
|
44 |
+
emb_id=i
|
45 |
+
i+=1
|
46 |
+
emb=np.load(os.path.join(oneperson_dir,oneperson_j))
|
47 |
+
ind2person[emb_id]=dict(person=oneperson,emb=emb)
|
48 |
+
return ind2person
|
49 |
+
|
50 |
+
def image_rec(image, known_face_encodings, _ind2person):
|
51 |
+
"""
|
52 |
+
args:
|
53 |
+
image: cv2 format
|
54 |
+
return:
|
55 |
+
image: cv2 format
|
56 |
+
"""
|
57 |
+
# image = np.array(image)
|
58 |
+
face_locations = face_recognition.face_locations(image)
|
59 |
+
face_encodings = face_recognition.face_encodings(image, face_locations)
|
60 |
+
face_names = []
|
61 |
+
for face_encoding in face_encodings:
|
62 |
+
# See if the face is a match for the known face(s)
|
63 |
+
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
|
64 |
+
name = "Unknown"
|
65 |
+
|
66 |
+
# # If a match was found in known_face_encodings, just use the first one.
|
67 |
+
# if True in matches:
|
68 |
+
# first_match_index = matches.index(True)
|
69 |
+
# name = known_face_names[first_match_index]
|
70 |
+
|
71 |
+
# Or instead, use the known face with the smallest distance to the new face
|
72 |
+
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
|
73 |
+
best_match_index = np.argmin(face_distances)
|
74 |
+
if matches[best_match_index]:
|
75 |
+
name = _ind2person[best_match_index]['person']
|
76 |
+
print(f"rec {name}!!")
|
77 |
+
face_names.append(name)
|
78 |
+
nameset = list(set(face_names))
|
79 |
+
colors=[(255,0,0),(0,255,0),(0,0,255),(0,255,255),(255,255,0),(156,102,31),(255,0,255)]
|
80 |
+
chose_colors = random.sample(colors,len(nameset))
|
81 |
+
name2color={_n:chose_colors[i] for i,_n in enumerate(nameset)}
|
82 |
+
print(name2color)
|
83 |
+
|
84 |
+
for (top, right, bottom, left), name in zip(face_locations, face_names):
|
85 |
+
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
|
86 |
+
# top *= 4
|
87 |
+
# right *= 4
|
88 |
+
# bottom *= 4
|
89 |
+
# left *= 4
|
90 |
+
print("detect image")
|
91 |
+
|
92 |
+
# Draw a box around the face
|
93 |
+
# cv2.rectangle(image, (left, top), (right, bottom), (0, 0, 255), 2)
|
94 |
+
cv2.rectangle(image, (left, top), (right, bottom), name2color[name], 2)
|
95 |
+
|
96 |
+
# Draw a label with a name below the face
|
97 |
+
# cv2.rectangle(image, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
|
98 |
+
# cv2.rectangle(image, (left, bottom - 35), (right, bottom), name2color[name], cv2.FILLED)
|
99 |
+
font = cv2.FONT_HERSHEY_DUPLEX
|
100 |
+
cv2.putText(image, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
|
101 |
+
# cv2.imshow('image', image)
|
102 |
+
# cv2.waitKey()
|
103 |
+
return image
|
video.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import face_recognition
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
# This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the
|
6 |
+
# other example, but it includes some basic performance tweaks to make things run a lot faster:
|
7 |
+
# 1. Process each video frame at 1/4 resolution (though still display it at full resolution)
|
8 |
+
# 2. Only detect faces in every other frame of video.
|
9 |
+
|
10 |
+
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
|
11 |
+
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
|
12 |
+
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
|
13 |
+
|
14 |
+
# Get a reference to webcam #0 (the default one)
|
15 |
+
video_capture = cv2.VideoCapture(0)
|
16 |
+
|
17 |
+
# Load a sample picture and learn how to recognize it.
|
18 |
+
obama_image = face_recognition.load_image_file("obama.jpg")
|
19 |
+
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
|
20 |
+
|
21 |
+
# Load a second sample picture and learn how to recognize it.
|
22 |
+
biden_image = face_recognition.load_image_file("biden.jpg")
|
23 |
+
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
|
24 |
+
|
25 |
+
# Load a second sample picture and learn how to recognize it.
|
26 |
+
me = face_recognition.load_image_file("me.jpg")
|
27 |
+
me_face_encoding = face_recognition.face_encodings(me)[0]
|
28 |
+
|
29 |
+
wang = face_recognition.load_image_file("wang.jpg")
|
30 |
+
wang_face_encoding = face_recognition.face_encodings(wang)[0]
|
31 |
+
|
32 |
+
# Create arrays of known face encodings and their names
|
33 |
+
known_face_encodings = [
|
34 |
+
obama_face_encoding,
|
35 |
+
biden_face_encoding,
|
36 |
+
me_face_encoding,
|
37 |
+
wang_face_encoding
|
38 |
+
]
|
39 |
+
known_face_names = [
|
40 |
+
"Barack Obama",
|
41 |
+
"Joe Biden",
|
42 |
+
"me",
|
43 |
+
"wang"
|
44 |
+
]
|
45 |
+
|
46 |
+
# Initialize some variables
|
47 |
+
face_locations = []
|
48 |
+
face_encodings = []
|
49 |
+
face_names = []
|
50 |
+
process_this_frame = True
|
51 |
+
|
52 |
+
while True:
|
53 |
+
# Grab a single frame of video
|
54 |
+
ret, frame = video_capture.read()
|
55 |
+
|
56 |
+
# Only process every other frame of video to save time
|
57 |
+
if process_this_frame:
|
58 |
+
# Resize frame of video to 1/4 size for faster face recognition processing
|
59 |
+
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
|
60 |
+
|
61 |
+
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
|
62 |
+
rgb_small_frame = small_frame[:, :, ::-1]
|
63 |
+
|
64 |
+
# Find all the faces and face encodings in the current frame of video
|
65 |
+
face_locations = face_recognition.face_locations(rgb_small_frame)
|
66 |
+
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
|
67 |
+
|
68 |
+
face_names = []
|
69 |
+
for face_encoding in face_encodings:
|
70 |
+
# See if the face is a match for the known face(s)
|
71 |
+
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
|
72 |
+
name = "Unknown"
|
73 |
+
|
74 |
+
# # If a match was found in known_face_encodings, just use the first one.
|
75 |
+
# if True in matches:
|
76 |
+
# first_match_index = matches.index(True)
|
77 |
+
# name = known_face_names[first_match_index]
|
78 |
+
|
79 |
+
# Or instead, use the known face with the smallest distance to the new face
|
80 |
+
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
|
81 |
+
best_match_index = np.argmin(face_distances)
|
82 |
+
if matches[best_match_index]:
|
83 |
+
name = known_face_names[best_match_index]
|
84 |
+
|
85 |
+
face_names.append(name)
|
86 |
+
|
87 |
+
process_this_frame = not process_this_frame
|
88 |
+
|
89 |
+
|
90 |
+
# Display the results
|
91 |
+
for (top, right, bottom, left), name in zip(face_locations, face_names):
|
92 |
+
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
|
93 |
+
top *= 4
|
94 |
+
right *= 4
|
95 |
+
bottom *= 4
|
96 |
+
left *= 4
|
97 |
+
|
98 |
+
# Draw a box around the face
|
99 |
+
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
|
100 |
+
|
101 |
+
# Draw a label with a name below the face
|
102 |
+
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
|
103 |
+
font = cv2.FONT_HERSHEY_DUPLEX
|
104 |
+
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
|
105 |
+
|
106 |
+
# Display the resulting image
|
107 |
+
cv2.imshow('Video', frame)
|
108 |
+
|
109 |
+
# Hit 'q' on the keyboard to quit!
|
110 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
111 |
+
break
|
112 |
+
|
113 |
+
# Release handle to the webcam
|
114 |
+
video_capture.release()
|
115 |
+
cv2.destroyAllWindows()
|
videofast.py
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import face_recognition
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
import os
|
5 |
+
import pickle
|
6 |
+
|
7 |
+
# This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the
|
8 |
+
# other example, but it includes some basic performance tweaks to make things run a lot faster:
|
9 |
+
# 1. Process each video frame at 1/4 resolution (though still display it at full resolution)
|
10 |
+
# 2. Only detect faces in every other frame of video.
|
11 |
+
|
12 |
+
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
|
13 |
+
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
|
14 |
+
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
|
15 |
+
|
16 |
+
# Get a reference to webcam #0 (the default one)
|
17 |
+
|
18 |
+
|
19 |
+
def get_emb(file_name):
|
20 |
+
if os.path.exists(file_name):
|
21 |
+
file_ = face_recognition.load_image_file(file_name)
|
22 |
+
emb = face_recognition.face_encodings(file_)[0]
|
23 |
+
np.save(file_name.replace(".jpg",'.npy'), emb)
|
24 |
+
else:
|
25 |
+
emb = np.load(file_name)
|
26 |
+
return emb
|
27 |
+
def input_an_image(image_file, person_name, ori_img_dir='images/ori_images',img_emb_dir='images/img_emb'):
|
28 |
+
image_file_dir=os.path.join(ori_img_dir,person_name)
|
29 |
+
emb_file_dir=os.path.join(img_emb_dir,person_name)
|
30 |
+
if not os.path.exists(image_file_dir):
|
31 |
+
os.mkdir(image_file_dir)
|
32 |
+
os.mkdir(emb_file_dir)
|
33 |
+
file_ind=0
|
34 |
+
else:
|
35 |
+
file_ind=len(os.listdir(image_file_dir))
|
36 |
+
file_ = face_recognition.load_image_file(image_file)
|
37 |
+
emb = face_recognition.face_encodings(file_)[0]
|
38 |
+
emb_file=image_file.split('.')[0]+f'_{file_ind}.npy'
|
39 |
+
emb_file_out_path=os.path.join(emb_file_dir,emb_file)
|
40 |
+
np.save(emb_file_out_path, emb)
|
41 |
+
return emb
|
42 |
+
def init_load_embs(img_emb_dir='images/img_emb'):
|
43 |
+
persons=os.listdir(img_emb_dir)
|
44 |
+
i=0
|
45 |
+
ind2person=dict()
|
46 |
+
for oneperson in persons:
|
47 |
+
oneperson_dir=os.path.join(img_emb_dir,oneperson)
|
48 |
+
oneperson_list=os.listdir(oneperson_dir)
|
49 |
+
for oneperson_j in oneperson_list:
|
50 |
+
emb_id=i
|
51 |
+
i+=1
|
52 |
+
emb=np.load(os.path.join(oneperson_dir,oneperson_j))
|
53 |
+
ind2person[emb_id]=dict(person=oneperson,emb=emb)
|
54 |
+
return ind2person
|
55 |
+
|
56 |
+
|
57 |
+
|
58 |
+
if __name__=="__main__":
|
59 |
+
ind2person=init_load_embs()
|
60 |
+
video_capture = cv2.VideoCapture(0)
|
61 |
+
emb=input_an_image('youpeng.jpg', "youpeng")
|
62 |
+
ind2person[len(list(ind2person.values()))]=dict(person="youpeng",emb=emb)
|
63 |
+
# img_emb_dir='images/img_emb'
|
64 |
+
# ori_img_dir='images/ori_images'
|
65 |
+
# if not os.path.exists(img_emb_dir):
|
66 |
+
# os.mkdir(img_emb_dir)
|
67 |
+
# if not os.path.exists(ori_img_dir):
|
68 |
+
# os.mkdir(ori_img_dir)
|
69 |
+
# # os.listdir()
|
70 |
+
# Load a sample picture and learn how to recognize it.
|
71 |
+
# file_list=["obama.jpg","biden.jpg","mengqi.jpg","xinyi.jpg","sixian.jpg","wang.jpg","chenmengqi.jpg",'yilin.jpg','youpeng.jpg','wangyibo.jpg']
|
72 |
+
|
73 |
+
|
74 |
+
# Create arrays of known face encodings and their names
|
75 |
+
# known_face_encodings = [
|
76 |
+
# obama_face_encoding,
|
77 |
+
# biden_face_encoding,
|
78 |
+
# me_face_encoding,
|
79 |
+
# wang_face_encoding
|
80 |
+
# ]
|
81 |
+
# known_face_names = [
|
82 |
+
# "Barack Obama",
|
83 |
+
# "Joe Biden",
|
84 |
+
# "me",
|
85 |
+
# "wang"
|
86 |
+
# ]
|
87 |
+
known_face_encodings=[v['emb'] for k,v in ind2person.items()]
|
88 |
+
# known_face_encodings=[get_emb(f) for f in file_list]
|
89 |
+
# known_face_names=[st.replace('.jpg','')for st in file_list]
|
90 |
+
# Initialize some variables
|
91 |
+
face_locations = []
|
92 |
+
face_encodings = []
|
93 |
+
face_names = []
|
94 |
+
process_this_frame = True
|
95 |
+
|
96 |
+
while True:
|
97 |
+
# Grab a single frame of video
|
98 |
+
ret, frame = video_capture.read()
|
99 |
+
|
100 |
+
# Only process every other frame of video to save time
|
101 |
+
if process_this_frame:
|
102 |
+
# Resize frame of video to 1/4 size for faster face recognition processing
|
103 |
+
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
|
104 |
+
|
105 |
+
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
|
106 |
+
rgb_small_frame = small_frame[:, :, ::-1]
|
107 |
+
|
108 |
+
# Find all the faces and face encodings in the current frame of video
|
109 |
+
face_locations = face_recognition.face_locations(rgb_small_frame, number_of_times_to_upsample=1)#, model="cnn")
|
110 |
+
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
|
111 |
+
|
112 |
+
face_names = []
|
113 |
+
for face_encoding in face_encodings:
|
114 |
+
# See if the face is a match for the known face(s)
|
115 |
+
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
|
116 |
+
name = "Unknown"
|
117 |
+
|
118 |
+
# # If a match was found in known_face_encodings, just use the first one.
|
119 |
+
# if True in matches:
|
120 |
+
# first_match_index = matches.index(True)
|
121 |
+
# name = known_face_names[first_match_index]
|
122 |
+
|
123 |
+
# Or instead, use the known face with the smallest distance to the new face
|
124 |
+
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
|
125 |
+
best_match_index = np.argmin(face_distances)
|
126 |
+
if matches[best_match_index]:
|
127 |
+
# name = known_face_names[best_match_index]
|
128 |
+
name = ind2person[best_match_index]['person']
|
129 |
+
|
130 |
+
face_names.append(name)
|
131 |
+
|
132 |
+
process_this_frame = not process_this_frame
|
133 |
+
|
134 |
+
|
135 |
+
# Display the results
|
136 |
+
for (top, right, bottom, left), name in zip(face_locations, face_names):
|
137 |
+
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
|
138 |
+
top *= 4
|
139 |
+
right *= 4
|
140 |
+
bottom *= 4
|
141 |
+
left *= 4
|
142 |
+
|
143 |
+
# Draw a box around the face
|
144 |
+
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
|
145 |
+
|
146 |
+
# Draw a label with a name below the face
|
147 |
+
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
|
148 |
+
font = cv2.FONT_HERSHEY_DUPLEX
|
149 |
+
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
|
150 |
+
|
151 |
+
# Display the resulting image
|
152 |
+
cv2.imshow('Video', frame)
|
153 |
+
|
154 |
+
# Hit 'q' on the keyboard to quit!
|
155 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
156 |
+
break
|
157 |
+
|
158 |
+
# Release handle to the webcam
|
159 |
+
video_capture.release()
|
160 |
+
cv2.destroyAllWindows()
|