Spaces:
Runtime error
Runtime error
File size: 3,165 Bytes
ea1dbbd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
import streamlit as st
from PIL import Image
import face_recognition
import cv2
import numpy as np
import requests
import os
st.title("AIMLJan24 - Face Recognition")
# create list of encoding of all images in photos folder
# Load images for face recognition
Images = [] # List to store Images
classnames = [] # List to store classnames
directory = "photos"
myList = os.listdir(directory)
st.write("Photographs found in folder : ")
for cls in myList:
if os.path.splitext(cls)[1] in [".jpg", ".jpeg"]:
img_path = os.path.join(directory, cls)
curImg = cv2.imread(img_path)
Images.append(curImg)
st.write(os.path.splitext(cls)[0])
classnames.append(os.path.splitext(cls)[0])
# Load images for face recognition
encodeListknown = [face_recognition.face_encodings(img)[0] for img in Images]
# camera to take photo of user in question
file_name = st.camera_input("Take a picture") #st.file_uploader("Upload image ")
# Function to update Aadhaar data
def update_data(name):
url = "https://aimljan24f1.glitch.me/adduserdata" #?rollno=222&name="+name
data = {'rollno':'222','name': name}
response = requests.post(url , data=data )
if response.status_code == 200:
st.success("Data updated on: " + url)
else:
st.warning("Data not updated")
if file_name is not None:
col1, col2 = st.columns(2)
test_image = Image.open(file_name)
image = np.asarray(test_image)
imgS = cv2.resize(image, (0, 0), None, 0.25, 0.25)
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
facesCurFrame = face_recognition.face_locations(imgS)
encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
name = "Unknown" # Default name for unknown faces
match_found = False # Flag to track if a match is found
# Checking if faces are detected
if len(encodesCurFrame) > 0:
for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
# Assuming that encodeListknown is defined and populated in your code
matches = face_recognition.compare_faces(encodeListknown, encodeFace)
faceDis = face_recognition.face_distance(encodeListknown, encodeFace)
matchIndex = np.argmin(faceDis)
if matches[matchIndex]:
name = classnames[matchIndex].upper()
match_found = True # Set the flag to True
y1, x2, y2, x1 = faceLoc
y1, x2, y2, x1 = (y1 * 4), (x2 * 4), (y2 * 4) ,(x1 * 4)
# Make a copy of the image array before drawing on it
image_copy = image.copy()
cv2.rectangle(image_copy, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.rectangle(image_copy, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)
cv2.putText(image_copy, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
# update the database
update_data(name)
st.image(image_copy, use_column_width=True, output_format="PNG")
else:
st.warning("No faces detected in the image. Face recognition failed.")
|