import streamlit as st
import cv2
import numpy as np
import datetime
import os
import time
import base64
import re
import glob
from camera_input_live import camera_input_live
import face_recognition
st.set_page_config(layout="wide")
def get_image_count():
return {'count': 0}
def save_image(image, image_count):
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"captured_image_{timestamp}_{image_count['count']}.png"
image_count['count'] += 1
bytes_data = image.getvalue()
cv2_img = cv2.imdecode(np.frombuffer(bytes_data, np.uint8), cv2.IMREAD_COLOR)
cv2.imwrite(filename, cv2_img)
return filename
def get_image_base64(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode()
def process_line(line):
if re.search(r'\b[A-G][#b]?m?\b', line):
line = re.sub(r'\b([A-G][#b]?m?)\b', r"", line)
return line
def process_sheet(sheet):
processed_lines = []
for line in sheet.split('\n'):
processed_line = process_line(line)
processed_lines.append(processed_line)
return '
'.join(processed_lines)
def main():
col1, col2 = st.columns([2, 3])
with col1:
st.markdown("✨ Magic Lens: Real-Time Camera Stream 🌈")
snapshot_interval = st.slider("Snapshot Interval (seconds)", 1, 10, 5)
image_placeholder = st.empty()
if 'captured_images' not in st.session_state:
st.session_state['captured_images'] = []
if 'last_captured' not in st.session_state:
st.session_state['last_captured'] = time.time()
image = camera_input_live()
if image is not None:
rgb_image = cv2.cvtColor(cv2.imdecode(np.frombuffer(image.getvalue(), np.uint8), cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
face_locations = face_recognition.face_locations(rgb_image)
face_encodings = face_recognition.face_encodings(rgb_image, face_locations)
if os.path.isfile("known_face.jpg"):
known_image = face_recognition.load_image_file("known_face.jpg")
known_encoding = face_recognition.face_encodings(known_image)[0]
#known_encoding = face_recognition.face_encodings(known_image)
else:
known_encoding = None
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
if known_encoding is not None:
matches = face_recognition.compare_faces([known_encoding], face_encoding)
if True in matches:
cv2.rectangle(rgb_image, (left, top), (right, bottom), (0, 255, 0), 2)
cv2.putText(rgb_image, "Known Face", (left, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
else:
cv2.rectangle(rgb_image, (left, top), (right, bottom), (0, 0, 255), 2)
else:
cv2.rectangle(rgb_image, (left, top), (right, bottom), (255, 0, 0), 2)
bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
image_placeholder.image(bgr_image, channels="BGR")
if time.time() - st.session_state['last_captured'] > snapshot_interval:
image_count = get_image_count()
filename = save_image(image, image_count)
st.session_state['captured_images'].append(filename)
st.session_state['last_captured'] = time.time()
if st.button("Register Known Face"):
if image is not None:
cv2_img = cv2.imdecode(np.frombuffer(image.getvalue(), np.uint8), cv2.IMREAD_COLOR)
cv2.imwrite("known_face.jpg", cv2_img)
st.success("Known face registered successfully!")
sidebar_html = "