awacke1 commited on
Commit
552c52a
1 Parent(s): 3da5683

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +122 -0
app.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import cv2
3
+ import numpy as np
4
+ import datetime
5
+ import os
6
+ import time
7
+ import base64
8
+ import re
9
+ import glob
10
+ from camera_input_live import camera_input_live
11
+ import face_recognition
12
+
13
+ st.set_page_config(layout="wide")
14
+
15
+ def get_image_count():
16
+ return {'count': 0}
17
+
18
+ def save_image(image, image_count):
19
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
20
+ filename = f"captured_image_{timestamp}_{image_count['count']}.png"
21
+ image_count['count'] += 1
22
+ bytes_data = image.getvalue()
23
+ cv2_img = cv2.imdecode(np.frombuffer(bytes_data, np.uint8), cv2.IMREAD_COLOR)
24
+ cv2.imwrite(filename, cv2_img)
25
+ return filename
26
+
27
+ def get_image_base64(image_path):
28
+ with open(image_path, "rb") as image_file:
29
+ return base64.b64encode(image_file.read()).decode()
30
+
31
+ def process_line(line):
32
+ if re.search(r'\b[A-G][#b]?m?\b', line):
33
+ line = re.sub(r'\b([A-G][#b]?m?)\b', r"<img src='\1.png' style='height:20px;'>", line)
34
+ return line
35
+
36
+ def process_sheet(sheet):
37
+ processed_lines = []
38
+ for line in sheet.split('\n'):
39
+ processed_line = process_line(line)
40
+ processed_lines.append(processed_line)
41
+ return '<br>'.join(processed_lines)
42
+
43
+ def main():
44
+ col1, col2 = st.columns([2, 3])
45
+
46
+ with col1:
47
+ st.markdown("✨ Magic Lens: Real-Time Camera Stream 🌈")
48
+
49
+ snapshot_interval = st.slider("Snapshot Interval (seconds)", 1, 10, 5)
50
+ image_placeholder = st.empty()
51
+
52
+ if 'captured_images' not in st.session_state:
53
+ st.session_state['captured_images'] = []
54
+ if 'last_captured' not in st.session_state:
55
+ st.session_state['last_captured'] = time.time()
56
+
57
+ image = camera_input_live()
58
+ if image is not None:
59
+ rgb_image = cv2.cvtColor(cv2.imdecode(np.frombuffer(image.getvalue(), np.uint8), cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
60
+
61
+ face_locations = face_recognition.face_locations(rgb_image)
62
+ face_encodings = face_recognition.face_encodings(rgb_image, face_locations)
63
+
64
+ if os.path.isfile("known_face.jpg"):
65
+ known_image = face_recognition.load_image_file("known_face.jpg")
66
+ known_encoding = face_recognition.face_encodings(known_image)[0]
67
+ else:
68
+ known_encoding = None
69
+
70
+ for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
71
+ if known_encoding is not None:
72
+ matches = face_recognition.compare_faces([known_encoding], face_encoding)
73
+
74
+ if True in matches:
75
+ cv2.rectangle(rgb_image, (left, top), (right, bottom), (0, 255, 0), 2)
76
+ cv2.putText(rgb_image, "Known Face", (left, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
77
+ else:
78
+ cv2.rectangle(rgb_image, (left, top), (right, bottom), (0, 0, 255), 2)
79
+ else:
80
+ cv2.rectangle(rgb_image, (left, top), (right, bottom), (255, 0, 0), 2)
81
+
82
+ bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
83
+ image_placeholder.image(bgr_image, channels="BGR")
84
+
85
+ if time.time() - st.session_state['last_captured'] > snapshot_interval:
86
+ image_count = get_image_count()
87
+ filename = save_image(image, image_count)
88
+ st.session_state['captured_images'].append(filename)
89
+ st.session_state['last_captured'] = time.time()
90
+
91
+ if st.button("Register Known Face"):
92
+ if image is not None:
93
+ cv2_img = cv2.imdecode(np.frombuffer(image.getvalue(), np.uint8), cv2.IMREAD_COLOR)
94
+ cv2.imwrite("known_face.jpg", cv2_img)
95
+ st.success("Known face registered successfully!")
96
+
97
+ sidebar_html = "<div style='display:flex;flex-direction:column;'>"
98
+ for img_file in st.session_state['captured_images']:
99
+ image_base64 = get_image_base64(img_file)
100
+ sidebar_html += f"<img src='data:image/png;base64,{image_base64}' style='width:100px;'><br>"
101
+ sidebar_html += "</div>"
102
+ st.sidebar.markdown("## Captured Images")
103
+ st.sidebar.markdown(sidebar_html, unsafe_allow_html=True)
104
+
105
+ st.markdown(f"<script>setInterval(function() {{ document.getElementById('timer').innerHTML = new Date().toLocaleTimeString(); }}, 1000);</script><div>Current Time: <span id='timer'></span></div>", unsafe_allow_html=True)
106
+
107
+ with col2:
108
+ st.markdown("## 🎬 Action! Real-Time Camera Stream Highlights 📽️")
109
+
110
+ all_files = [f for f in glob.glob("*.png") if ' by ' in f]
111
+ selected_file = st.selectbox("Choose a Dataset:", all_files)
112
+
113
+ if selected_file:
114
+ with open(selected_file, 'r', encoding='utf-8') as file:
115
+ sheet = file.read()
116
+ st.markdown(process_sheet(sheet), unsafe_allow_html=True)
117
+
118
+ if 'last_captured' in st.session_state and time.time() - st.session_state['last_captured'] > snapshot_interval:
119
+ st.experimental_rerun()
120
+
121
+ if __name__ == "__main__":
122
+ main()