Martlgap commited on
Commit
87c0d6a
1 Parent(s): 7d62c3d

minimal working example

Browse files
Files changed (5) hide show
  1. .DS_Store +0 -0
  2. .gitignore +1 -0
  3. app.py +5 -225
  4. app_bak.py +299 -0
  5. tools/webcam.py +2 -33
.DS_Store CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
 
.gitignore CHANGED
@@ -135,3 +135,4 @@ dmypy.json
135
 
136
  # MacOS
137
  .DS_Store
 
 
135
 
136
  # MacOS
137
  .DS_Store
138
+ .streamlit/
app.py CHANGED
@@ -1,13 +1,5 @@
1
  import streamlit as st
2
- import streamlit_toggle as tog
3
  import time
4
- import numpy as np
5
- import cv2
6
- from tools.annotation import draw_mesh, draw_landmarks, draw_bounding_box, draw_text
7
- from tools.alignment import align_faces
8
- from tools.identification import load_identification_model, inference, identify
9
- from tools.utils import show_images, show_faces, rgb
10
- from tools.detection import load_detection_model, detect_faces
11
  from tools.webcam import init_webcam
12
  import logging
13
 
@@ -20,126 +12,12 @@ logging.basicConfig(level=logging.ERROR)
20
  st.set_page_config(layout="wide")
21
 
22
 
23
- # Initialize the Face Detection and Identification Models
24
- detection_model = load_detection_model(max_faces=2, detection_confidence=0.5, tracking_confidence=0.9)
25
- identification_model = load_identification_model(name="MobileNet")
26
-
27
-
28
- # Gallery Processing
29
- @st.cache_data
30
- def gallery_processing(gallery_files):
31
- """Process the gallery images (Complete Face Recognition Pipeline)
32
-
33
- Args:
34
- gallery_files (_type_): Files uploaded by the user
35
-
36
- Returns:
37
- _type_: Gallery Images, Gallery Embeddings, Gallery Names
38
- """
39
- gallery_images, gallery_embs, gallery_names = [], [], []
40
- if gallery_files is not None:
41
- for file in gallery_files:
42
- file_bytes = np.asarray(bytearray(file.read()), dtype=np.uint8)
43
- img = cv2.cvtColor(
44
- cv2.imdecode(file_bytes, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB
45
- )
46
- gallery_names.append(
47
- file.name.split(".jpg")[0].split(".png")[0].split(".jpeg")[0]
48
- )
49
- detections = detect_faces(img, detection_model)
50
- aligned_faces = align_faces(img, np.asarray([detections[0]]))
51
- gallery_images.append(aligned_faces[0])
52
- gallery_embs.append(inference(aligned_faces, identification_model)[0])
53
- return gallery_images, gallery_embs, gallery_names
54
-
55
-
56
- class SideBar:
57
- """A class to handle the sidebar
58
- """
59
- def __init__(self):
60
- with st.sidebar:
61
- st.markdown("# Preferences")
62
- self.on_face_recognition = tog.st_toggle_switch(
63
- "Face Recognition", key="activate_face_rec", default_value=True, active_color=rgb(255, 75, 75), track_color=rgb(50, 50, 50)
64
- )
65
-
66
- st.markdown("---")
67
-
68
- st.markdown("## Webcam")
69
- self.resolution = st.selectbox(
70
- "Webcam Resolution",
71
- [(1920, 1080), (1280, 720), (640, 360)],
72
- index=2,
73
- )
74
- st.markdown("To change webcam resolution: Please refresh page and select resolution before starting webcam stream.")
75
-
76
- st.markdown("---")
77
- st.markdown("## Face Detection")
78
- self.max_faces = st.number_input(
79
- "Maximum Number of Faces", value=2, min_value=1
80
- )
81
- self.detection_confidence = st.slider(
82
- "Min Detection Confidence", min_value=0.0, max_value=1.0, value=0.5
83
- )
84
- self.tracking_confidence = st.slider(
85
- "Min Tracking Confidence", min_value=0.0, max_value=1.0, value=0.9
86
- )
87
- switch1, switch2 = st.columns(2)
88
- with switch1:
89
- self.on_bounding_box = tog.st_toggle_switch(
90
- "Show Bounding Box", key="show_bounding_box", default_value=True, active_color=rgb(255, 75, 75), track_color=rgb(50, 50, 50)
91
- )
92
- with switch2:
93
- self.on_five_landmarks = tog.st_toggle_switch(
94
- "Show Five Landmarks", key="show_five_landmarks", default_value=True, active_color=rgb(255, 75, 75),
95
- track_color=rgb(50, 50, 50)
96
- )
97
- switch3, switch4 = st.columns(2)
98
- with switch3:
99
- self.on_mesh = tog.st_toggle_switch(
100
- "Show Mesh", key="show_mesh", default_value=True, active_color=rgb(255, 75, 75),
101
- track_color=rgb(50, 50, 50)
102
- )
103
- with switch4:
104
- self.on_text = tog.st_toggle_switch(
105
- "Show Text", key="show_text", default_value=True, active_color=rgb(255, 75, 75),
106
- track_color=rgb(50, 50, 50)
107
- )
108
- st.markdown("---")
109
-
110
- st.markdown("## Face Recognition")
111
- self.similarity_threshold = st.slider(
112
- "Similarity Threshold", min_value=0.0, max_value=2.0, value=0.67
113
- )
114
-
115
- self.on_show_faces = tog.st_toggle_switch(
116
- "Show Recognized Faces", key="show_recognized_faces", default_value=True, active_color=rgb(255, 75, 75), track_color=rgb(50, 50, 50)
117
- )
118
-
119
- self.model_name = st.selectbox(
120
- "Model",
121
- ["MobileNet", "ResNet"],
122
- index=0,
123
- )
124
- st.markdown("---")
125
-
126
- st.markdown("## Gallery")
127
- self.uploaded_files = st.file_uploader(
128
- "Choose multiple images to upload", accept_multiple_files=True
129
- )
130
-
131
- self.gallery_images, self.gallery_embs, self.gallery_names= gallery_processing(self.uploaded_files)
132
-
133
- st.markdown("**Gallery Faces**")
134
- show_images(self.gallery_images, self.gallery_names, 3)
135
- st.markdown("---")
136
-
137
-
138
  class KPI:
139
  """Class for displaying KPIs in a row
140
  Args:
141
  keys (list): List of KPI names
142
  """
 
143
  def __init__(self, keys):
144
  self.kpi_texts = []
145
  row = st.columns(len(keys))
@@ -158,52 +36,26 @@ class KPI:
158
  unsafe_allow_html=True,
159
  )
160
 
 
161
  # -----------------------------------------------------------------------------------------------
162
  # Streamlit App
163
  st.title("FaceID App Demonstration")
164
 
165
- # Sidebar
166
- sb = SideBar()
167
-
168
  # Get Access to Webcam
169
- webcam = init_webcam(width=sb.resolution[0])
170
 
171
  # KPI Section
172
  st.markdown("**Stats**")
173
- kpi = KPI([
174
- "**FrameRate**",
175
- "**Detected Faces**",
176
- "**Image Dims**",
177
- "**Detection [ms]**",
178
- "**Normalization [ms]**",
179
- "**Inference [ms]**",
180
- "**Recognition [ms]**",
181
- "**Annotations [ms]**",
182
- "**Show Faces [ms]**",
183
- ])
184
  st.markdown("---")
185
 
186
  # Live Stream Display
187
  stream_display = st.empty()
188
  st.markdown("---")
189
 
190
- # Display Detected Faces
191
- st.markdown("**Detected Faces**")
192
- face_window = st.empty()
193
- st.markdown("---")
194
-
195
-
196
  if webcam:
197
  prevTime = 0
198
  while True:
199
- # Init times to "-" to show something if face recognition is turned off
200
- time_detection = "-"
201
- time_alignment = "-"
202
- time_inference = "-"
203
- time_identification = "-"
204
- time_annotations = "-"
205
- time_show_faces = "-"
206
-
207
  try:
208
  # Get Frame from Webcam
209
  frame = webcam.get_frame(timeout=1)
@@ -212,66 +64,6 @@ if webcam:
212
  frame = frame.to_ndarray(format="rgb24")
213
  except:
214
  continue
215
-
216
- # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
217
- # FACE RECOGNITION PIPELINE
218
- if sb.on_face_recognition:
219
- # FACE DETECTION ---------------------------------------------------------
220
- start_time = time.time()
221
- detections = detect_faces(frame, detection_model)
222
- time_detection = (time.time() - start_time) * 1000
223
-
224
- # FACE ALIGNMENT ---------------------------------------------------------
225
- start_time = time.time()
226
- aligned_faces = align_faces(frame, detections)
227
- time_alignment = (time.time() - start_time) * 1000
228
-
229
- # INFERENCE --------------------------------------------------------------
230
- start_time = time.time()
231
- if len(sb.gallery_embs) > 0:
232
- faces_embs = inference(aligned_faces, identification_model)
233
- else:
234
- faces_embs = []
235
- time_inference = (time.time() - start_time) * 1000
236
-
237
- # FACE IDENTIFCATION -----------------------------------------------------
238
- start_time = time.time()
239
- if len(faces_embs) > 0 and len(sb.gallery_embs) > 0:
240
- ident_names, ident_dists, ident_imgs = identify(faces_embs, sb.gallery_embs, sb.gallery_names, sb.gallery_images, thresh=sb.similarity_threshold)
241
- else:
242
- ident_names, ident_dists, ident_imgs = [], [], []
243
- time_identification = (time.time() - start_time) * 1000
244
-
245
- # ANNOTATIONS ------------------------------------------------------------
246
- start_time = time.time()
247
- frame = cv2.resize(frame, (1920, 1080)) # to make annotation in HD
248
- frame.flags.writeable = True # (hack to make annotations faster)
249
- if sb.on_mesh:
250
- frame = draw_mesh(frame, detections)
251
- if sb.on_five_landmarks:
252
- frame = draw_landmarks(frame, detections)
253
- if sb.on_bounding_box:
254
- frame = draw_bounding_box(frame, detections, ident_names)
255
- if sb.on_text:
256
- frame = draw_text(frame, detections, ident_names)
257
- time_annotations = (time.time() - start_time) * 1000
258
-
259
- # DISPLAY DETECTED FACES -------------------------------------------------
260
- start_time = time.time()
261
- if sb.on_show_faces:
262
- show_faces(
263
- aligned_faces,
264
- ident_names,
265
- ident_dists,
266
- ident_imgs,
267
- num_cols=3,
268
- channels="RGB",
269
- display=face_window,
270
- )
271
- time_show_faces = (time.time() - start_time) * 1000
272
- # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
273
-
274
-
275
 
276
  # DISPLAY THE LIVE STREAM --------------------------------------------------
277
  stream_display.image(
@@ -284,16 +76,4 @@ if webcam:
284
  prevTime = currTime
285
 
286
  # UPDATE KPIS -------------------------------------------------------------
287
- kpi.update_kpi(
288
- [
289
- fps,
290
- len(detections),
291
- sb.resolution,
292
- time_detection,
293
- time_alignment,
294
- time_inference,
295
- time_identification,
296
- time_annotations,
297
- time_show_faces,
298
- ]
299
- )
 
1
  import streamlit as st
 
2
  import time
 
 
 
 
 
 
 
3
  from tools.webcam import init_webcam
4
  import logging
5
 
 
12
  st.set_page_config(layout="wide")
13
 
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  class KPI:
16
  """Class for displaying KPIs in a row
17
  Args:
18
  keys (list): List of KPI names
19
  """
20
+
21
  def __init__(self, keys):
22
  self.kpi_texts = []
23
  row = st.columns(len(keys))
 
36
  unsafe_allow_html=True,
37
  )
38
 
39
+
40
  # -----------------------------------------------------------------------------------------------
41
  # Streamlit App
42
  st.title("FaceID App Demonstration")
43
 
 
 
 
44
  # Get Access to Webcam
45
+ webcam = init_webcam()
46
 
47
  # KPI Section
48
  st.markdown("**Stats**")
49
+ kpi = KPI(["**FrameRate**"])
 
 
 
 
 
 
 
 
 
 
50
  st.markdown("---")
51
 
52
  # Live Stream Display
53
  stream_display = st.empty()
54
  st.markdown("---")
55
 
 
 
 
 
 
 
56
  if webcam:
57
  prevTime = 0
58
  while True:
 
 
 
 
 
 
 
 
59
  try:
60
  # Get Frame from Webcam
61
  frame = webcam.get_frame(timeout=1)
 
64
  frame = frame.to_ndarray(format="rgb24")
65
  except:
66
  continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
  # DISPLAY THE LIVE STREAM --------------------------------------------------
69
  stream_display.image(
 
76
  prevTime = currTime
77
 
78
  # UPDATE KPIS -------------------------------------------------------------
79
+ kpi.update_kpi([fps])
 
 
 
 
 
 
 
 
 
 
 
 
app_bak.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import streamlit_toggle as tog
3
+ import time
4
+ import numpy as np
5
+ import cv2
6
+ from tools.annotation import draw_mesh, draw_landmarks, draw_bounding_box, draw_text
7
+ from tools.alignment import align_faces
8
+ from tools.identification import load_identification_model, inference, identify
9
+ from tools.utils import show_images, show_faces, rgb
10
+ from tools.detection import load_detection_model, detect_faces
11
+ from tools.webcam import init_webcam
12
+ import logging
13
+
14
+
15
+ # Set logging level to error (To avoid getting spammed by queue warnings etc.)
16
+ logging.basicConfig(level=logging.ERROR)
17
+
18
+
19
+ # Set page layout for streamlit to wide
20
+ st.set_page_config(layout="wide")
21
+
22
+
23
+ # Initialize the Face Detection and Identification Models
24
+ detection_model = load_detection_model(max_faces=2, detection_confidence=0.5, tracking_confidence=0.9)
25
+ identification_model = load_identification_model(name="MobileNet")
26
+
27
+
28
+ # Gallery Processing
29
+ @st.cache_data
30
+ def gallery_processing(gallery_files):
31
+ """Process the gallery images (Complete Face Recognition Pipeline)
32
+
33
+ Args:
34
+ gallery_files (_type_): Files uploaded by the user
35
+
36
+ Returns:
37
+ _type_: Gallery Images, Gallery Embeddings, Gallery Names
38
+ """
39
+ gallery_images, gallery_embs, gallery_names = [], [], []
40
+ if gallery_files is not None:
41
+ for file in gallery_files:
42
+ file_bytes = np.asarray(bytearray(file.read()), dtype=np.uint8)
43
+ img = cv2.cvtColor(
44
+ cv2.imdecode(file_bytes, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB
45
+ )
46
+ gallery_names.append(
47
+ file.name.split(".jpg")[0].split(".png")[0].split(".jpeg")[0]
48
+ )
49
+ detections = detect_faces(img, detection_model)
50
+ aligned_faces = align_faces(img, np.asarray([detections[0]]))
51
+ gallery_images.append(aligned_faces[0])
52
+ gallery_embs.append(inference(aligned_faces, identification_model)[0])
53
+ return gallery_images, gallery_embs, gallery_names
54
+
55
+
56
+ class SideBar:
57
+ """A class to handle the sidebar
58
+ """
59
+ def __init__(self):
60
+ with st.sidebar:
61
+ st.markdown("# Preferences")
62
+ self.on_face_recognition = tog.st_toggle_switch(
63
+ "Face Recognition", key="activate_face_rec", default_value=True, active_color=rgb(255, 75, 75), track_color=rgb(50, 50, 50)
64
+ )
65
+
66
+ st.markdown("---")
67
+
68
+ st.markdown("## Webcam")
69
+ self.resolution = st.selectbox(
70
+ "Webcam Resolution",
71
+ [(1920, 1080), (1280, 720), (640, 360)],
72
+ index=2,
73
+ )
74
+ st.markdown("To change webcam resolution: Please refresh page and select resolution before starting webcam stream.")
75
+
76
+ st.markdown("---")
77
+ st.markdown("## Face Detection")
78
+ self.max_faces = st.number_input(
79
+ "Maximum Number of Faces", value=2, min_value=1
80
+ )
81
+ self.detection_confidence = st.slider(
82
+ "Min Detection Confidence", min_value=0.0, max_value=1.0, value=0.5
83
+ )
84
+ self.tracking_confidence = st.slider(
85
+ "Min Tracking Confidence", min_value=0.0, max_value=1.0, value=0.9
86
+ )
87
+ switch1, switch2 = st.columns(2)
88
+ with switch1:
89
+ self.on_bounding_box = tog.st_toggle_switch(
90
+ "Show Bounding Box", key="show_bounding_box", default_value=True, active_color=rgb(255, 75, 75), track_color=rgb(50, 50, 50)
91
+ )
92
+ with switch2:
93
+ self.on_five_landmarks = tog.st_toggle_switch(
94
+ "Show Five Landmarks", key="show_five_landmarks", default_value=True, active_color=rgb(255, 75, 75),
95
+ track_color=rgb(50, 50, 50)
96
+ )
97
+ switch3, switch4 = st.columns(2)
98
+ with switch3:
99
+ self.on_mesh = tog.st_toggle_switch(
100
+ "Show Mesh", key="show_mesh", default_value=True, active_color=rgb(255, 75, 75),
101
+ track_color=rgb(50, 50, 50)
102
+ )
103
+ with switch4:
104
+ self.on_text = tog.st_toggle_switch(
105
+ "Show Text", key="show_text", default_value=True, active_color=rgb(255, 75, 75),
106
+ track_color=rgb(50, 50, 50)
107
+ )
108
+ st.markdown("---")
109
+
110
+ st.markdown("## Face Recognition")
111
+ self.similarity_threshold = st.slider(
112
+ "Similarity Threshold", min_value=0.0, max_value=2.0, value=0.67
113
+ )
114
+
115
+ self.on_show_faces = tog.st_toggle_switch(
116
+ "Show Recognized Faces", key="show_recognized_faces", default_value=True, active_color=rgb(255, 75, 75), track_color=rgb(50, 50, 50)
117
+ )
118
+
119
+ self.model_name = st.selectbox(
120
+ "Model",
121
+ ["MobileNet", "ResNet"],
122
+ index=0,
123
+ )
124
+ st.markdown("---")
125
+
126
+ st.markdown("## Gallery")
127
+ self.uploaded_files = st.file_uploader(
128
+ "Choose multiple images to upload", accept_multiple_files=True
129
+ )
130
+
131
+ self.gallery_images, self.gallery_embs, self.gallery_names= gallery_processing(self.uploaded_files)
132
+
133
+ st.markdown("**Gallery Faces**")
134
+ show_images(self.gallery_images, self.gallery_names, 3)
135
+ st.markdown("---")
136
+
137
+
138
+ class KPI:
139
+ """Class for displaying KPIs in a row
140
+ Args:
141
+ keys (list): List of KPI names
142
+ """
143
+ def __init__(self, keys):
144
+ self.kpi_texts = []
145
+ row = st.columns(len(keys))
146
+ for kpi, key in zip(row, keys):
147
+ with kpi:
148
+ item_row = st.columns(2)
149
+ item_row[0].markdown(f"**{key}**:")
150
+ self.kpi_texts.append(item_row[1].markdown("-"))
151
+
152
+ def update_kpi(self, kpi_values):
153
+ for kpi_text, kpi_value in zip(self.kpi_texts, kpi_values):
154
+ kpi_text.write(
155
+ f"<h5 style='text-align: center; color: red;'>{kpi_value:.2f}</h5>"
156
+ if isinstance(kpi_value, float)
157
+ else f"<h5 style='text-align: center; color: red;'>{kpi_value}</h5>",
158
+ unsafe_allow_html=True,
159
+ )
160
+
161
+ # -----------------------------------------------------------------------------------------------
162
+ # Streamlit App
163
+ st.title("FaceID App Demonstration")
164
+
165
+ # Sidebar
166
+ sb = SideBar()
167
+
168
+ # Get Access to Webcam
169
+ webcam = init_webcam(width=sb.resolution[0])
170
+
171
+ # KPI Section
172
+ st.markdown("**Stats**")
173
+ kpi = KPI([
174
+ "**FrameRate**",
175
+ "**Detected Faces**",
176
+ "**Image Dims**",
177
+ "**Detection [ms]**",
178
+ "**Normalization [ms]**",
179
+ "**Inference [ms]**",
180
+ "**Recognition [ms]**",
181
+ "**Annotations [ms]**",
182
+ "**Show Faces [ms]**",
183
+ ])
184
+ st.markdown("---")
185
+
186
+ # Live Stream Display
187
+ stream_display = st.empty()
188
+ st.markdown("---")
189
+
190
+ # Display Detected Faces
191
+ st.markdown("**Detected Faces**")
192
+ face_window = st.empty()
193
+ st.markdown("---")
194
+
195
+
196
+ if webcam:
197
+ prevTime = 0
198
+ while True:
199
+ # Init times to "-" to show something if face recognition is turned off
200
+ time_detection = "-"
201
+ time_alignment = "-"
202
+ time_inference = "-"
203
+ time_identification = "-"
204
+ time_annotations = "-"
205
+ time_show_faces = "-"
206
+
207
+ try:
208
+ # Get Frame from Webcam
209
+ frame = webcam.get_frame(timeout=1)
210
+
211
+ # Convert to OpenCV Image
212
+ frame = frame.to_ndarray(format="rgb24")
213
+ except:
214
+ continue
215
+
216
+ # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
217
+ # FACE RECOGNITION PIPELINE
218
+ if sb.on_face_recognition:
219
+ # FACE DETECTION ---------------------------------------------------------
220
+ start_time = time.time()
221
+ detections = detect_faces(frame, detection_model)
222
+ time_detection = (time.time() - start_time) * 1000
223
+
224
+ # FACE ALIGNMENT ---------------------------------------------------------
225
+ start_time = time.time()
226
+ aligned_faces = align_faces(frame, detections)
227
+ time_alignment = (time.time() - start_time) * 1000
228
+
229
+ # INFERENCE --------------------------------------------------------------
230
+ start_time = time.time()
231
+ if len(sb.gallery_embs) > 0:
232
+ faces_embs = inference(aligned_faces, identification_model)
233
+ else:
234
+ faces_embs = []
235
+ time_inference = (time.time() - start_time) * 1000
236
+
237
+ # FACE IDENTIFCATION -----------------------------------------------------
238
+ start_time = time.time()
239
+ if len(faces_embs) > 0 and len(sb.gallery_embs) > 0:
240
+ ident_names, ident_dists, ident_imgs = identify(faces_embs, sb.gallery_embs, sb.gallery_names, sb.gallery_images, thresh=sb.similarity_threshold)
241
+ else:
242
+ ident_names, ident_dists, ident_imgs = [], [], []
243
+ time_identification = (time.time() - start_time) * 1000
244
+
245
+ # ANNOTATIONS ------------------------------------------------------------
246
+ start_time = time.time()
247
+ frame = cv2.resize(frame, (1920, 1080)) # to make annotation in HD
248
+ frame.flags.writeable = True # (hack to make annotations faster)
249
+ if sb.on_mesh:
250
+ frame = draw_mesh(frame, detections)
251
+ if sb.on_five_landmarks:
252
+ frame = draw_landmarks(frame, detections)
253
+ if sb.on_bounding_box:
254
+ frame = draw_bounding_box(frame, detections, ident_names)
255
+ if sb.on_text:
256
+ frame = draw_text(frame, detections, ident_names)
257
+ time_annotations = (time.time() - start_time) * 1000
258
+
259
+ # DISPLAY DETECTED FACES -------------------------------------------------
260
+ start_time = time.time()
261
+ if sb.on_show_faces:
262
+ show_faces(
263
+ aligned_faces,
264
+ ident_names,
265
+ ident_dists,
266
+ ident_imgs,
267
+ num_cols=3,
268
+ channels="RGB",
269
+ display=face_window,
270
+ )
271
+ time_show_faces = (time.time() - start_time) * 1000
272
+ # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
273
+
274
+
275
+
276
+ # DISPLAY THE LIVE STREAM --------------------------------------------------
277
+ stream_display.image(
278
+ frame, channels="RGB", caption="Live-Stream", use_column_width=True
279
+ )
280
+
281
+ # CALCULATE FPS -----------------------------------------------------------
282
+ currTime = time.time()
283
+ fps = 1 / (currTime - prevTime)
284
+ prevTime = currTime
285
+
286
+ # UPDATE KPIS -------------------------------------------------------------
287
+ kpi.update_kpi(
288
+ [
289
+ fps,
290
+ len(detections),
291
+ sb.resolution,
292
+ time_detection,
293
+ time_alignment,
294
+ time_inference,
295
+ time_identification,
296
+ time_annotations,
297
+ time_show_faces,
298
+ ]
299
+ )
tools/webcam.py CHANGED
@@ -1,12 +1,9 @@
1
  import streamlit as st
2
- from streamlit_webrtc import webrtc_streamer, WebRtcMode, RTCConfiguration
3
- ## This sample code is from https://www.twilio.com/docs/stun-turn/api
4
- # Download the helper library from https://www.twilio.com/docs/python/install
5
  import os
6
  from twilio.rest import Client
7
 
8
- # Find your Account SID and Auth Token at twilio.com/console
9
- # and set the environment variables. See http://twil.io/secure
10
  account_sid = os.environ['TWILIO_ACCOUNT_SID']
11
  auth_token = os.environ['TWILIO_AUTH_TOKEN']
12
  client = Client(account_sid, auth_token)
@@ -18,34 +15,6 @@ RTC_CONFIGURATION={
18
  "iceServers": token.ice_servers
19
  }
20
 
21
- # RTC_CONFIGURATION = RTCConfiguration({
22
- # "iceServers": [
23
- # {
24
- # "urls": "stun:a.relay.metered.ca:80",
25
- # },
26
- # {
27
- # "urls": "turn:a.relay.metered.ca:80",
28
- # "username": "5b3af333bdecb76c15167cf2",
29
- # "credential": "bGnptPEBRNPnMKLP",
30
- # },
31
- # {
32
- # "urls": "turn:a.relay.metered.ca:80?transport=tcp",
33
- # "username": "5b3af333bdecb76c15167cf2",
34
- # "credential": "bGnptPEBRNPnMKLP",
35
- # },
36
- # {
37
- # "urls": "turn:a.relay.metered.ca:443",
38
- # "username": "5b3af333bdecb76c15167cf2",
39
- # "credential": "bGnptPEBRNPnMKLP",
40
- # },
41
- # {
42
- # "urls": "turn:a.relay.metered.ca:443?transport=tcp",
43
- # "username": "5b3af333bdecb76c15167cf2",
44
- # "credential": "bGnptPEBRNPnMKLP",
45
- # },
46
- # ],
47
- # })
48
-
49
 
50
  @st.cache_resource(experimental_allow_widgets=True)
51
  def init_webcam(width=680):
 
1
  import streamlit as st
2
+ from streamlit_webrtc import webrtc_streamer, WebRtcMode
 
 
3
  import os
4
  from twilio.rest import Client
5
 
6
+
 
7
  account_sid = os.environ['TWILIO_ACCOUNT_SID']
8
  auth_token = os.environ['TWILIO_AUTH_TOKEN']
9
  client = Client(account_sid, auth_token)
 
15
  "iceServers": token.ice_servers
16
  }
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  @st.cache_resource(experimental_allow_widgets=True)
20
  def init_webcam(width=680):