Spaces:
Runtime error
Runtime error
ANSELME
commited on
Commit
•
b01aff7
1
Parent(s):
d44d05f
Upload 18 files
Browse files- Attendance.csv +19 -0
- Bateau.jpg +0 -0
- Bill Gates.png +0 -0
- Elon Musk.jpg +0 -0
- Jack Ma.jpg +0 -0
- Jeff Bezos.jpg +0 -0
- Mark Zuckerberg.jpg +0 -0
- Preparing_local.py +101 -0
- README.md +13 -13
- Ruth Oteng.jpg +0 -0
- Ruth.png +0 -0
- Ssembuya Abdurahumani.png +0 -0
- Ssembuya.jpg +0 -0
- Training.py +24 -0
- Warren Buffett.jpg +0 -0
- app.py +155 -0
- encoded_faces.pickle +3 -0
- requirements.txt +5 -0
Attendance.csv
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Name,Arrive_Time,Date,Late_penalty
|
2 |
+
,,,
|
3 |
+
Elon Musk,18:54:00,4/5/2023,-5
|
4 |
+
Bill Gates,21:00:34,4/25/2023,-5
|
5 |
+
Jeff Bezos,21:02:35,4/25/2023,-5
|
6 |
+
ruth,19:58:52,4/26/2023,-5
|
7 |
+
Abdul,15:42:55,5/3/2023,-5
|
8 |
+
Bill Gates,15:25:16,5/6/2023,-2
|
9 |
+
Mark Zuckerberg,15:45:01,5/6/2023,-2
|
10 |
+
Ssembuya Abdurahumani,15:45:14,5/6/2023,-2
|
11 |
+
Ruth Oteng,12:00:39,5/8/2023,-2
|
12 |
+
|
13 |
+
Ssembuya Abdurahumani,10:09:42,2023/05/10,-2
|
14 |
+
Ruth Oteng,12:50:42,2023/05/11,-2
|
15 |
+
Ruth,13:04:57,2023/05/11,-2
|
16 |
+
Ruth Oteng,15:37:23,2023/05/15,-2
|
17 |
+
Ssembuya Abdurahumani,15:38:04,2023/05/15,-2
|
18 |
+
Ruth,15:50:56,2023/05/15,-2
|
19 |
+
Mark Zuckerberg,15:53:13,2023/05/15,-2
|
Bateau.jpg
ADDED
Bill Gates.png
ADDED
Elon Musk.jpg
ADDED
Jack Ma.jpg
ADDED
Jeff Bezos.jpg
ADDED
Mark Zuckerberg.jpg
ADDED
Preparing_local.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import face_recognition
|
3 |
+
import os
|
4 |
+
import time
|
5 |
+
import pandas as pd
|
6 |
+
import streamlit as st
|
7 |
+
|
8 |
+
# Declaring variables
|
9 |
+
path = "db"
|
10 |
+
scale = 2
|
11 |
+
|
12 |
+
def late_penalty(arrive_time):
|
13 |
+
penalty = 0
|
14 |
+
if int(arrive_time[0:2])> 9:
|
15 |
+
penalty = -2
|
16 |
+
return penalty
|
17 |
+
|
18 |
+
def markattendance(person_name, attendance_file):
|
19 |
+
|
20 |
+
with open(attendance_file.name,'r+') as f:
|
21 |
+
lines = f.readlines()
|
22 |
+
name_list=[]
|
23 |
+
now = time.localtime()
|
24 |
+
date = time.strftime("%Y/%m/%d", now)
|
25 |
+
|
26 |
+
for line in lines:
|
27 |
+
entry = line.split(',')
|
28 |
+
if len(entry)>1:
|
29 |
+
if entry[2] == date:
|
30 |
+
name_list.append(entry[0])
|
31 |
+
|
32 |
+
if person_name not in name_list:
|
33 |
+
arrive_time = time.strftime("%H:%M:%S", now)
|
34 |
+
penalty = late_penalty(arrive_time)
|
35 |
+
f.writelines(f'\n{person_name},{arrive_time},{date},{penalty}')
|
36 |
+
|
37 |
+
f = open(attendance_file.name,'r',encoding = 'utf-8')
|
38 |
+
df = pd.read_csv(f)
|
39 |
+
return df
|
40 |
+
|
41 |
+
|
42 |
+
def prepare_test_img(test_img):
|
43 |
+
test_img = face_recognition.load_image_file(test_img)
|
44 |
+
#test_img = cv2.cvtColor(test_img,cv2.COLOR_BGR2RGB)
|
45 |
+
test_img_small = cv2.resize(test_img,(0,0),None,0.5,0.5)
|
46 |
+
|
47 |
+
face_test_locations = face_recognition.face_locations(test_img_small, model = "hog")
|
48 |
+
encoded_tests = face_recognition.face_encodings(test_img_small)
|
49 |
+
return test_img, encoded_tests, face_test_locations
|
50 |
+
|
51 |
+
|
52 |
+
def test(encoded_tests, face_test_locations, test_img, encoded_trains, attendance_file):
|
53 |
+
images = os.listdir(path)
|
54 |
+
name_indices = []
|
55 |
+
df ="No Faces Found" #for handling an error when no faces detected
|
56 |
+
|
57 |
+
for encoded_test, face_test_location in zip(encoded_tests, face_test_locations):
|
58 |
+
results = face_recognition.compare_faces(encoded_trains,encoded_test,tolerance=0.49)
|
59 |
+
# face_distances = face_recognition.face_distance(encoded_trains,encoded_test)
|
60 |
+
# st.write(min(face_distances))
|
61 |
+
|
62 |
+
if True in results:
|
63 |
+
name_index = results.index(True)
|
64 |
+
name_indices.append(name_index)
|
65 |
+
|
66 |
+
for count, image in enumerate(images):
|
67 |
+
if count == name_index:
|
68 |
+
person_name = image.split(".")[0]
|
69 |
+
top_left, bottom_right = (face_test_location[3]*scale, face_test_location[0]*scale) ,(face_test_location[1]*scale, face_test_location[2]*scale)
|
70 |
+
cv2.rectangle(test_img,(top_left),(bottom_right),(255,0,255),2)
|
71 |
+
cv2.rectangle(test_img,(bottom_right),(top_left[0], bottom_right[1]+30),(255,0,255),cv2.FILLED)
|
72 |
+
cv2.putText(test_img,person_name,(top_left[0]+6,bottom_right[1]+25),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,255),1)
|
73 |
+
df=markattendance(person_name, attendance_file)
|
74 |
+
|
75 |
+
else:
|
76 |
+
top_left, bottom_right = (face_test_location[3]*scale, face_test_location[0]*scale) ,(face_test_location[1]*scale, face_test_location[2]*scale)
|
77 |
+
cv2.rectangle(test_img,(top_left),(bottom_right),(255,0,255),2)
|
78 |
+
cv2.rectangle(test_img,(bottom_right),(top_left[0], bottom_right[1]+30),(255,0,255),cv2.FILLED)
|
79 |
+
cv2.putText(test_img,"UNKNOWN",(top_left[0]+6,bottom_right[1]+25),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,255),1)
|
80 |
+
f = open(attendance_file.name,'r',encoding = 'utf-8')
|
81 |
+
df = pd.read_csv(f)
|
82 |
+
|
83 |
+
# this code was to put attendances in a dictionary but now i'm using pandas
|
84 |
+
# attendance_list = [False for i in range(len(results))]
|
85 |
+
# for i in name_indices:
|
86 |
+
# attendance_list[i] = True
|
87 |
+
|
88 |
+
# for image in images:
|
89 |
+
# names.append(image.split(".")[0])
|
90 |
+
|
91 |
+
# ans = {}
|
92 |
+
# for i, name in enumerate (names):
|
93 |
+
# ans[name] = attendance_list[i]
|
94 |
+
return (df)
|
95 |
+
|
96 |
+
|
97 |
+
|
98 |
+
|
99 |
+
|
100 |
+
|
101 |
+
|
README.md
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
-
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
-
sdk:
|
7 |
-
sdk_version:
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license:
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
---
|
2 |
+
title: FaceRecognition
|
3 |
+
emoji: 📊
|
4 |
+
colorFrom: pink
|
5 |
+
colorTo: gray
|
6 |
+
sdk: streamlit
|
7 |
+
sdk_version: 1.21.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: cc
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
Ruth Oteng.jpg
ADDED
Ruth.png
ADDED
Ssembuya Abdurahumani.png
ADDED
Ssembuya.jpg
ADDED
Training.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import face_recognition
|
3 |
+
import os
|
4 |
+
# import streamlit as st
|
5 |
+
# import pickle
|
6 |
+
# Declaring variables
|
7 |
+
path = "db"
|
8 |
+
|
9 |
+
def training(path):
|
10 |
+
images = os.listdir(path)
|
11 |
+
encoded_trains =[]
|
12 |
+
|
13 |
+
for image in images:
|
14 |
+
train_img = face_recognition.load_image_file(f"db/{image}")
|
15 |
+
train_img = cv2.cvtColor(train_img,cv2.COLOR_BGR2RGB)
|
16 |
+
encoded_trains.append(face_recognition.face_encodings(train_img)[0])
|
17 |
+
return encoded_trains, images
|
18 |
+
|
19 |
+
|
20 |
+
# encoded_trains, images = training(path)
|
21 |
+
# st.write(images)
|
22 |
+
# output_file = 'encoded_faces.pickle'
|
23 |
+
# with open(output_file, 'wb') as f_out:
|
24 |
+
# pickle.dump(encoded_trains, f_out)
|
Warren Buffett.jpg
ADDED
app.py
ADDED
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import time
|
3 |
+
import cv2
|
4 |
+
import pickle
|
5 |
+
import face_recognition
|
6 |
+
from Preparing_local import prepare_test_img, test
|
7 |
+
from PIL import Image
|
8 |
+
import os
|
9 |
+
|
10 |
+
|
11 |
+
# Define the path to the 'db' directory
|
12 |
+
db_folder = 'db'
|
13 |
+
|
14 |
+
# Create the 'db' directory if it doesn't exist
|
15 |
+
if not os.path.exists(db_folder):
|
16 |
+
os.makedirs(db_folder)
|
17 |
+
|
18 |
+
|
19 |
+
t0= time.time()
|
20 |
+
# print("Hello")
|
21 |
+
# Declaring variables
|
22 |
+
path = "db"
|
23 |
+
|
24 |
+
|
25 |
+
def main():
|
26 |
+
# Loading the mode
|
27 |
+
#@st.cache
|
28 |
+
def load_model():
|
29 |
+
with open ('encoded_faces.pickle', 'rb') as f_in:
|
30 |
+
encoded_trains = pickle.load(f_in)
|
31 |
+
return encoded_trains
|
32 |
+
encoded_trains = load_model()
|
33 |
+
|
34 |
+
# Start of the project
|
35 |
+
st.title("Attendance Management System Using Face Recognition")
|
36 |
+
st.sidebar.title("Take Attendance")
|
37 |
+
app_mode = st.sidebar.selectbox("Choose Mode",
|
38 |
+
["Attend from image", "Attend using camera", "Training","Add New Student"])
|
39 |
+
|
40 |
+
|
41 |
+
if app_mode == "Attend from image":
|
42 |
+
attendance_file = st.file_uploader("Choose attendance file",type =['csv'])
|
43 |
+
uploaded_file = st.file_uploader("Upload a picture of a student to mark the attendance", type=['jpg', 'jpeg', 'png'])
|
44 |
+
if attendance_file is not None and uploaded_file is not None:
|
45 |
+
|
46 |
+
test_img, encoded_tests, face_test_locations = prepare_test_img(uploaded_file)
|
47 |
+
df = test(encoded_tests, face_test_locations, test_img, encoded_trains, attendance_file)
|
48 |
+
t1 = time.time() - t0
|
49 |
+
|
50 |
+
st.write("Time elapsed: ", t1)
|
51 |
+
# test_img = cv2.resize(test_img,(0,0),None,0.50,0.50)
|
52 |
+
st.image(test_img)
|
53 |
+
st.write(df)
|
54 |
+
|
55 |
+
|
56 |
+
elif app_mode == "Attend using camera":
|
57 |
+
attendance_file = st.file_uploader("Choose attendance file",type =['csv'])
|
58 |
+
picture = st.camera_input("Take a picture")
|
59 |
+
if picture is not None and attendance_file is not None:
|
60 |
+
|
61 |
+
test_img, encoded_tests, face_test_locations = prepare_test_img(picture)
|
62 |
+
df = test(encoded_tests, face_test_locations, test_img, encoded_trains, attendance_file)
|
63 |
+
t1 = time.time() - t0
|
64 |
+
|
65 |
+
st.write("Time elapsed: ", t1)
|
66 |
+
#test_img = cv2.resize(test_img,(0,0),None,0.50,0.50)
|
67 |
+
st.image(test_img)
|
68 |
+
st.write(df)
|
69 |
+
|
70 |
+
|
71 |
+
elif app_mode == "Training":
|
72 |
+
st.subheader('Training Steps:')
|
73 |
+
st.markdown("1. Get a photo of every Student with **only one face** in the picture.")
|
74 |
+
st.markdown('2. Put all the photos in the **db** folder')
|
75 |
+
st.markdown("3. Press **Train The Model** Button")
|
76 |
+
|
77 |
+
if st.button("Train The Model"):
|
78 |
+
import Training
|
79 |
+
encoded_trains, images = Training.training(path)
|
80 |
+
st.write(images)
|
81 |
+
st.write(len(encoded_trains))
|
82 |
+
output_file = 'encoded_faces.pickle'
|
83 |
+
|
84 |
+
with open(output_file, 'wb') as f_out:
|
85 |
+
pickle.dump(encoded_trains, f_out)
|
86 |
+
|
87 |
+
|
88 |
+
elif app_mode == "Attend Live":
|
89 |
+
st.title("Webcam Live Feed")
|
90 |
+
attendance_file = st.file_uploader("Choose attendance file",type =['csv'])
|
91 |
+
|
92 |
+
if attendance_file is not None:
|
93 |
+
run = st.checkbox('Run')
|
94 |
+
FRAME_WINDOW = st.image([])
|
95 |
+
camera = cv2.VideoCapture(2)
|
96 |
+
|
97 |
+
while run:
|
98 |
+
_, test_img = camera.read()
|
99 |
+
test_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2RGB)
|
100 |
+
test_img_small = cv2.resize(test_img,(0,0),None,0.5,0.5)
|
101 |
+
|
102 |
+
face_test_locations = face_recognition.face_locations(test_img_small, model = "hog")
|
103 |
+
encoded_tests = face_recognition.face_encodings(test_img_small)
|
104 |
+
df = test(encoded_tests, face_test_locations, test_img, encoded_trains, attendance_file)
|
105 |
+
#st.image(test_img)
|
106 |
+
FRAME_WINDOW.image(test_img)
|
107 |
+
#st.write(df)
|
108 |
+
|
109 |
+
|
110 |
+
|
111 |
+
elif app_mode == "Add New Student":
|
112 |
+
st.title("Register Here")
|
113 |
+
Name = st.text_input('Enter Your Name:')
|
114 |
+
def load_image(image_file):
|
115 |
+
img = Image.open(image_file)
|
116 |
+
return img
|
117 |
+
|
118 |
+
image_file = st.camera_input("Take a picture")
|
119 |
+
|
120 |
+
|
121 |
+
if image_file is not None:
|
122 |
+
# TO See details
|
123 |
+
file_details = Name
|
124 |
+
st.write("Your Name:"+file_details)
|
125 |
+
st.image(load_image(image_file), width=250)
|
126 |
+
|
127 |
+
#Saving upload
|
128 |
+
|
129 |
+
|
130 |
+
|
131 |
+
with open(os.path.join("db",Name+'.jpg'),"wb") as f:
|
132 |
+
f.write((image_file).getbuffer())
|
133 |
+
|
134 |
+
st.success("File Saved successfully")
|
135 |
+
st.write("Now,Please click on Register Button")
|
136 |
+
|
137 |
+
if st.button("Register"):
|
138 |
+
import Training
|
139 |
+
encoded_trains, images = Training.training(path)
|
140 |
+
st.write(images)
|
141 |
+
st.write(len(encoded_trains))
|
142 |
+
output_file = 'encoded_faces.pickle'
|
143 |
+
|
144 |
+
with open(output_file, 'wb') as f_out:
|
145 |
+
pickle.dump(encoded_trains, f_out)
|
146 |
+
|
147 |
+
|
148 |
+
|
149 |
+
else:
|
150 |
+
st.write('Stopped')
|
151 |
+
|
152 |
+
|
153 |
+
|
154 |
+
if __name__=='__main__':
|
155 |
+
main()
|
encoded_faces.pickle
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6b89a272db53287dde17f1d8363fa30b84a7df72ab8ba3c84740ad257c2a2b48
|
3 |
+
size 10691
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dlib
|
2 |
+
pandas
|
3 |
+
numpy
|
4 |
+
face_recognition
|
5 |
+
opencv-python
|