import cv2 import numpy as np import face_recognition import os from datetime import datetime import gradio as gr path = 'images' images = [] personNames = [] myList = os.listdir(path) unkownEncodings=[] names=[] print(myList) for cu_img in myList: current_Img = cv2.imread(f'{path}/{cu_img}') images.append(current_Img) personNames.append(os.path.splitext(cu_img)[0]) print(personNames) def faceEncodings(images): encodeList = [] for img in images: img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) encode = face_recognition.face_encodings(img)[0] encodeList.append(encode) return encodeList encodeListKnown = faceEncodings(images) print('All Encodings Complete!!!') def Attandance(video): cap = cv2.VideoCapture(video) index=1 while True: #try: ret, frame = cap.read() faces = cv2.resize(frame, (0, 0), None, 0.25, 0.25) faces = cv2.cvtColor(faces, cv2.COLOR_BGR2RGB) facesCurrentFrame = face_recognition.face_locations(faces) encodesCurrentFrame = face_recognition.face_encodings(faces, facesCurrentFrame) for encodeFace, faceLoc in zip(encodesCurrentFrame, facesCurrentFrame): matches = face_recognition.compare_faces(encodeListKnown, encodeFace) faceDis = face_recognition.face_distance(encodeListKnown, encodeFace) # print(faceDis) matchIndex = np.argmin(faceDis) if matches[matchIndex]: name = personNames[matchIndex].upper() names.append(name) if cv2.waitKey(1) == 2: break return ''.join(name) demo=gr.Interface(fn=Attandance, inputs="video", outputs="text", title="Face Attendance", ) demo.launch(debug=True)