import streamlit as st from PIL import Image import face_recognition import cv2 import numpy as np import os import sqlite3 from datetime import datetime import requests st.title("Face Recognition based attendance system") # Load images for face recognition Images = [] classnames = [] directory = "photos" myList = os.listdir(directory) current_datetime = datetime.now().strftime("%Y-%m-%d %H:%M:%S") st.write("Photographs found in folder : ") for cls in myList: if os.path.splitext(cls)[1] in [".jpg", ".jpeg"]: img_path = os.path.join(directory, cls) curImg = cv2.imread(img_path) Images.append(curImg) st.write(os.path.splitext(cls)[0]) classnames.append(os.path.splitext(cls)[0]) # Load images for face recognition encodeListknown = [face_recognition.face_encodings(img)[0] for img in Images] # camera to take photo of user in question file_name = st.camera_input("Upload image") def add_attendance(names): url = "https://ai-ml-project.glitch.me/adduserdata1" # Change this URL to your Glitch endpoint success_count = 0 print(len(names)) data = {'name': name} response = requests.get(url, data=data) if response.status_code == 200: success_count += 1 else: st.warning(f"Failed to mark attendance for {name}") if success_count == len(names): st.success("Attendance marked for all recognized faces. Have a good day!") else: st.success("Attendance marked for some faces. Check warnings for details.") if file_name is not None: col1, col2 = st.columns(2) test_image = Image.open(file_name) image = np.asarray(test_image) imgS = cv2.resize(image, (0, 0), None, 0.25, 0.25) imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB) facesCurFrame = face_recognition.face_locations(imgS) encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame) # List to store recognized names for all faces in the image recognized_names = [] # Checking if faces are detected if len(encodesCurFrame) > 0: for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame): # Assuming that encodeListknown is defined and populated in your code matches = face_recognition.compare_faces(encodeListknown, encodeFace) faceDis = face_recognition.face_distance(encodeListknown, encodeFace) # Initialize name as Unknown name = "Unknown" # Check if there's a match with known faces if True in matches: matchIndex = np.argmin(faceDis) name = classnames[matchIndex].upper() # Append recognized name to the list recognized_names.append(name) # Draw rectangle around the face y1, x2, y2, x1 = faceLoc y1, x2, y2, x1 = (y1 * 4), (x2 * 4), (y2 * 4) ,(x1 * 4) image = image.copy() cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2) cv2.putText(image, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2) # Store attendance in SQLite database print(recognized_names) # Display the image with recognized faces st.image(image, use_column_width=True, output_format="PNG") st.write("Length : {recognizes_names}") # Display recognized names st.write("Recognized Names:") for i, name in enumerate(recognized_names): st.write(f"Face {i+1}: {name}") add_attendance(name) else: st.warning("No faces detected in the image. Face recognition failed.")