Spaces:
Sleeping
Sleeping
import cv2 | |
import numpy as np | |
import face_recognition | |
import os | |
from datetime import datetime | |
import gradio as gr | |
def faceEncodings(images): | |
encodeList = [] | |
for img in images: | |
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) | |
encode = face_recognition.face_encodings(img)[0] | |
encodeList.append(encode) | |
return encodeList | |
def Attandance(text,video,image): | |
names=[] | |
path = text | |
images = [] | |
personNames = [] | |
myList = os.listdir(path) | |
unkownEncodings=[] | |
print(myList) | |
for cu_img in myList: | |
current_Img = cv2.imread(f'{path}/{cu_img}') | |
images.append(current_Img) | |
personNames.append(os.path.splitext(cu_img)[0]) | |
print(personNames) | |
encodeListKnown = faceEncodings(images) | |
print('All Encodings Complete!!!') | |
if video is not None: | |
cap = cv2.VideoCapture(video) | |
index=1 | |
while True: | |
try: | |
ret, frame = cap.read() | |
#faces = cv2.resize(frame, (0, 0), None, 0.25, 0.25) | |
faces = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
facesCurrentFrame = face_recognition.face_locations(faces) | |
encodesCurrentFrame = face_recognition.face_encodings(faces, facesCurrentFrame) | |
for encodeFace, faceLoc in zip(encodesCurrentFrame, facesCurrentFrame): | |
matches = face_recognition.compare_faces(encodeListKnown, encodeFace) | |
faceDis = face_recognition.face_distance(encodeListKnown, encodeFace) | |
# print(faceDis) | |
matchIndex = np.argmin(faceDis) | |
if matches[matchIndex]: | |
name = personNames[matchIndex].upper() | |
if names.count(name) == 0: | |
names.append(name) | |
cv2.waitKey(1) | |
except: | |
break | |
return ' '.join(names) | |
else: | |
try: | |
#faces = cv2.resize(frame, (0, 0), None, 0.25, 0.25) | |
faces = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
facesCurrentFrame = face_recognition.face_locations(faces) | |
encodesCurrentFrame = face_recognition.face_encodings(faces, facesCurrentFrame) | |
for encodeFace, faceLoc in zip(encodesCurrentFrame, facesCurrentFrame): | |
matches = face_recognition.compare_faces(encodeListKnown, encodeFace) | |
faceDis = face_recognition.face_distance(encodeListKnown, encodeFace) | |
# print(faceDis) | |
matchIndex = np.argmin(faceDis) | |
if matches[matchIndex]: | |
name = personNames[matchIndex].upper() | |
if names.count(name) == 0: | |
names.append(name) | |
cv2.waitKey(1) | |
except: | |
break | |
return ' '.join(names) | |
demo=gr.Interface(fn=Attandance, | |
inputs=["text","video","image"], | |
outputs="text", | |
title="Face Attendance", | |
) | |
demo.launch(debug=True) | |