Spaces:
Sleeping
Sleeping
adewopova
commited on
Commit
•
d1a7e9f
1
Parent(s):
96c9fbd
Update accident_app.py
Browse files- accident_app.py +421 -0
accident_app.py
ADDED
@@ -0,0 +1,421 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
import tensorflow as tf
|
5 |
+
import random
|
6 |
+
from tensorflow.keras.models import load_model
|
7 |
+
import subprocess
|
8 |
+
import shutil
|
9 |
+
from multiprocessing import Pool
|
10 |
+
import tempfile
|
11 |
+
import streamlit as st
|
12 |
+
import base64
|
13 |
+
|
14 |
+
# [All the functions from first code option]
|
15 |
+
|
16 |
+
def load_and_preprocess_video(video_path):
|
17 |
+
cap = cv2.VideoCapture(video_path)
|
18 |
+
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
19 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
20 |
+
frames = []
|
21 |
+
while True:
|
22 |
+
ret, frame = cap.read()
|
23 |
+
if not ret:
|
24 |
+
break
|
25 |
+
frame = crop_center_square(frame)
|
26 |
+
frame = cv2.resize(frame, (224, 224))
|
27 |
+
frame = frame[:, :, [2, 1, 0]]
|
28 |
+
frames.append(frame)
|
29 |
+
cap.release()
|
30 |
+
# print("Total Frames:", total_frames)
|
31 |
+
# print("Frames per Second:", fps)
|
32 |
+
# print("frame processed:",len(frames))
|
33 |
+
return np.array(frames)
|
34 |
+
|
35 |
+
|
36 |
+
def calculate_optical_flow(frames):
|
37 |
+
gray_frames = [cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) for frame in frames]
|
38 |
+
optical_flow_frames = []
|
39 |
+
for i in range(len(gray_frames) - 1):
|
40 |
+
flow = cv2.calcOpticalFlowFarneback(gray_frames[i], gray_frames[i + 1], None, 0.5, 3, 15, 3, 5, 1.2, 0)
|
41 |
+
optical_flow_frames.append(flow)
|
42 |
+
optical_flow_frames.append(optical_flow_frames[-1])
|
43 |
+
return np.array(optical_flow_frames)
|
44 |
+
|
45 |
+
|
46 |
+
|
47 |
+
def parallel_optical_flow(chunks):
|
48 |
+
with Pool(processes=os.cpu_count()) as pool:
|
49 |
+
optical_flows = pool.map(calculate_optical_flow, chunks)
|
50 |
+
return optical_flows
|
51 |
+
|
52 |
+
|
53 |
+
def crop_center_square(frame):
|
54 |
+
y, x = frame.shape[0:2]
|
55 |
+
min_dim = min(y, x)
|
56 |
+
start_x = x // 2 - min_dim // 2
|
57 |
+
start_y = y // 2 - min_dim // 2
|
58 |
+
return frame[start_y:start_y + min_dim, start_x:start_x + min_dim]
|
59 |
+
def load_and_preprocess_video_every_5th_frame(video_path):
|
60 |
+
cap = cv2.VideoCapture(video_path)
|
61 |
+
frames = []
|
62 |
+
frame_count = 0
|
63 |
+
while True:
|
64 |
+
ret, frame = cap.read()
|
65 |
+
if not ret:
|
66 |
+
break
|
67 |
+
if frame_count % 5 == 0:
|
68 |
+
frame = crop_center_square(frame)
|
69 |
+
frame = cv2.resize(frame, (224, 224))
|
70 |
+
frame = frame[:, :, [2, 1, 0]]
|
71 |
+
frames.append(frame)
|
72 |
+
frame_count += 1
|
73 |
+
cap.release()
|
74 |
+
return np.array(frames)
|
75 |
+
|
76 |
+
|
77 |
+
|
78 |
+
def pad_chunk(chunk, window_size=30):
|
79 |
+
while chunk.shape[0] < window_size:
|
80 |
+
chunk = np.vstack((chunk, [chunk[-1]])) # appending the last frame to the chunk
|
81 |
+
return chunk
|
82 |
+
|
83 |
+
def create_chunks_from_frames(frames, window_size=30):
|
84 |
+
# Create non-overlapping chunks of window_size from frames
|
85 |
+
chunks = [frames[i:i+window_size] for i in range(0, len(frames), window_size)]
|
86 |
+
if len(chunks[-1]) < window_size:
|
87 |
+
chunks[-1] = pad_chunk(chunks[-1])
|
88 |
+
return chunks
|
89 |
+
|
90 |
+
|
91 |
+
# def overlay_predictions_to_video(frames, predictions):
|
92 |
+
# temp_dir = 'temp_frames'
|
93 |
+
|
94 |
+
# # Clear existing frames and video if they exist
|
95 |
+
# if os.path.exists(temp_dir):
|
96 |
+
# shutil.rmtree(temp_dir)
|
97 |
+
# os.makedirs(temp_dir)
|
98 |
+
|
99 |
+
# video_output_path = 'p_user_upload.mp4'
|
100 |
+
# if os.path.exists(video_output_path):
|
101 |
+
# os.remove(video_output_path)
|
102 |
+
|
103 |
+
# frame_idx = 0
|
104 |
+
|
105 |
+
# for prediction in predictions:
|
106 |
+
# # Overlay the prediction for WINDOW_SIZE frames
|
107 |
+
# if frame_idx >= len(frames): # Make sure not to exceed total frames
|
108 |
+
# break
|
109 |
+
# frame = frames[frame_idx]
|
110 |
+
|
111 |
+
# frame_idx += 1
|
112 |
+
# color = (0, 255, 0) if prediction[1] > prediction[0] else (255, 0, 0)
|
113 |
+
# frame = cv2.putText(frame, f"Accident: {prediction[0]:.2f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2, cv2.LINE_AA)
|
114 |
+
# frame = cv2.putText(frame, f"No Accident: {prediction[1]:.2f}", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2, cv2.LINE_AA)
|
115 |
+
# frame = frame[:, :, [2, 1, 0]]
|
116 |
+
|
117 |
+
# # Save frame to disk
|
118 |
+
# cv2.imwrite(os.path.join(temp_dir, f'frame_{frame_idx:04d}.png'), frame)
|
119 |
+
|
120 |
+
# # Use ffmpeg to stitch frames into video
|
121 |
+
# cmd = f"ffmpeg -framerate 20 -i {temp_dir}/frame_%04d.png -c:v libx264 -pix_fmt yuv420p {video_output_path}"
|
122 |
+
# subprocess.call(cmd, shell=True)
|
123 |
+
|
124 |
+
# # Remove the temporary frames directory
|
125 |
+
# shutil.rmtree(temp_dir)
|
126 |
+
def overlay_predictions_to_video(frames, predictions):
|
127 |
+
temp_dir = 'temp_frames'
|
128 |
+
|
129 |
+
# Clear existing frames and video if they exist
|
130 |
+
if os.path.exists(temp_dir):
|
131 |
+
shutil.rmtree(temp_dir)
|
132 |
+
os.makedirs(temp_dir)
|
133 |
+
|
134 |
+
video_output_path = 'p_user_upload.mp4'
|
135 |
+
if os.path.exists(video_output_path):
|
136 |
+
os.remove(video_output_path)
|
137 |
+
|
138 |
+
frame_idx = 0
|
139 |
+
|
140 |
+
# Desired resolution for the video
|
141 |
+
desired_resolution = (1280, 720) # HD resolution
|
142 |
+
|
143 |
+
for prediction in predictions:
|
144 |
+
# Overlay the prediction for WINDOW_SIZE frames
|
145 |
+
if frame_idx >= len(frames): # Make sure not to exceed total frames
|
146 |
+
break
|
147 |
+
frame = frames[frame_idx]
|
148 |
+
|
149 |
+
# Resize the frame to the desired resolution
|
150 |
+
frame = cv2.resize(frame, desired_resolution, interpolation=cv2.INTER_AREA)
|
151 |
+
|
152 |
+
frame_idx += 1
|
153 |
+
color = (0, 255, 0) if prediction[1] > prediction[0] else (255, 0, 0)
|
154 |
+
frame = cv2.putText(frame, f"Accident: {prediction[0]:.2f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2, cv2.LINE_AA)
|
155 |
+
frame = cv2.putText(frame, f"No Accident: {prediction[1]:.2f}", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2, cv2.LINE_AA)
|
156 |
+
frame = frame[:, :, [2, 1, 0]]
|
157 |
+
|
158 |
+
# Save frame to disk
|
159 |
+
cv2.imwrite(os.path.join(temp_dir, f'frame_{frame_idx:04d}.png'), frame, [int(cv2.IMWRITE_PNG_COMPRESSION), 0]) # Highest quality
|
160 |
+
|
161 |
+
# Use ffmpeg to stitch frames into video with higher bitrate for better quality
|
162 |
+
cmd = f"ffmpeg -framerate 20 -i {temp_dir}/frame_%04d.png -c:v libx264 -b:v 1500k -pix_fmt yuv420p {video_output_path}"
|
163 |
+
subprocess.call(cmd, shell=True)
|
164 |
+
|
165 |
+
|
166 |
+
def process_video(video_path, model):
|
167 |
+
# Load all frames
|
168 |
+
frames = load_and_preprocess_video(video_path)
|
169 |
+
|
170 |
+
# Create chunks of size 30 from frames
|
171 |
+
chunks = create_chunks_from_frames(frames)
|
172 |
+
|
173 |
+
# Calculate optical flow for all chunks
|
174 |
+
optical_flows = parallel_optical_flow(chunks)
|
175 |
+
optical_flows = [flow / np.max(np.abs(flow), axis=(1, 2), keepdims=True) for flow in optical_flows]
|
176 |
+
|
177 |
+
# Normalize frames
|
178 |
+
chunks = [chunk / 255.0 for chunk in chunks]
|
179 |
+
|
180 |
+
# Batch predictions
|
181 |
+
all_predictions = []
|
182 |
+
|
183 |
+
for i in range(len(chunks)):
|
184 |
+
batched_frames = np.array([chunks[i]])
|
185 |
+
batched_flows = np.array([optical_flows[i]])
|
186 |
+
prediction = model.predict([batched_frames, batched_flows])
|
187 |
+
#print(prediction)
|
188 |
+
all_predictions.extend([prediction[0]] * WINDOW_SIZE)
|
189 |
+
|
190 |
+
# Overlay predictions to the video and save
|
191 |
+
overlay_predictions_to_video(frames, all_predictions)
|
192 |
+
#return all_predictions
|
193 |
+
#___________________________________________________________________________________________________
|
194 |
+
|
195 |
+
# [All the functions from the second set of code]
|
196 |
+
def second_calculate_optical_flow(frames):
|
197 |
+
gray_frames = [cv2.cvtColor(tf.cast(frame, tf.uint8).numpy(), cv2.COLOR_RGB2GRAY) for frame in frames]
|
198 |
+
optical_flow_frames = []
|
199 |
+
for i in range(len(gray_frames) - 1):
|
200 |
+
flow = cv2.calcOpticalFlowFarneback(gray_frames[i], gray_frames[i + 1], None, 0.5, 3, 15, 3, 5, 1.2, 0)
|
201 |
+
optical_flow_frames.append(flow)
|
202 |
+
|
203 |
+
# Repeat the last optical flow frame
|
204 |
+
optical_flow_frames.append(optical_flow_frames[-1])
|
205 |
+
#optical_flow_frames
|
206 |
+
optical_flow_frames=np.array(optical_flow_frames)
|
207 |
+
return optical_flow_frames
|
208 |
+
|
209 |
+
def singledatacombined_load_and_preprocess_video(video_path,max_frames=30):
|
210 |
+
|
211 |
+
cap = cv2.VideoCapture(video_path)
|
212 |
+
frames = np.zeros(shape=(max_frames, 224, 224, 3))
|
213 |
+
cap = cv2.VideoCapture(video_path)
|
214 |
+
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
215 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
216 |
+
i = 0
|
217 |
+
frame_count = 0
|
218 |
+
try:
|
219 |
+
while True:
|
220 |
+
(ret, frame) = cap.read()
|
221 |
+
if not ret:
|
222 |
+
break
|
223 |
+
if frame_count %5 == 0:
|
224 |
+
frame = crop_center_square(frame)
|
225 |
+
frame = cv2.resize(frame, (224, 224))
|
226 |
+
frame = frame[:, :, [2, 1, 0]]
|
227 |
+
frames[i] = frame
|
228 |
+
i += 1
|
229 |
+
if i == max_frames:
|
230 |
+
break
|
231 |
+
frame_count += 1
|
232 |
+
finally:
|
233 |
+
cap.release()
|
234 |
+
st.write("Total Frames:", total_frames)
|
235 |
+
#print("Frames per Second:", fps)
|
236 |
+
st.write("frame processed:",len(frames))
|
237 |
+
|
238 |
+
return tf.constant(frames,dtype=tf.float32)#, Label #(tf.constant(frames, dtype=tf.float32))/ 255.0, Label
|
239 |
+
|
240 |
+
|
241 |
+
def singlegenerator(video_path, max_frames=30,augment_data=False):
|
242 |
+
frames = singledatacombined_load_and_preprocess_video(video_path)
|
243 |
+
optical=second_calculate_optical_flow(frames)
|
244 |
+
optical_flow = tf.convert_to_tensor(optical)
|
245 |
+
|
246 |
+
optical_flow = optical_flow / tf.reduce_max(tf.abs(optical_flow)) #Normalize oprical flow
|
247 |
+
if augment_data:
|
248 |
+
# Apply data augmentation to frames
|
249 |
+
augmented_frames = []
|
250 |
+
random_num = random.random()
|
251 |
+
for frame in frames:
|
252 |
+
if random_num < 0.25:
|
253 |
+
augmented_frame = tf.image.random_flip_left_right(frame)
|
254 |
+
elif random_num < 0.5:
|
255 |
+
augmented_frame = tf.image.random_flip_up_down(frame)
|
256 |
+
elif random_num < 0.75:
|
257 |
+
num_rotations = random.randint(0, 3)
|
258 |
+
augmented_frame =tf.image.rot90(frame, k=num_rotations)
|
259 |
+
else:
|
260 |
+
augmented_frame = frame
|
261 |
+
augmented_frames.append(augmented_frame)
|
262 |
+
frames = tf.stack(augmented_frames)#np.array(augmented_frames)
|
263 |
+
frames=frames/ 255.0
|
264 |
+
return (frames, optical_flow)#, label
|
265 |
+
|
266 |
+
def single_video_predict_on_frames(vid_dir):
|
267 |
+
st.write("===================================")
|
268 |
+
frames, optical_flow = singlegenerator(vid_dir,augment_data=False)
|
269 |
+
# Model prediction
|
270 |
+
prediction = loaded_model([frames[tf.newaxis, ...], optical_flow[tf.newaxis, ...]])
|
271 |
+
labels_map = ["Accident", "No Accident"]
|
272 |
+
video_name=str(vid_dir)#.split('/')[-1])
|
273 |
+
# Extracting max prediction and its index
|
274 |
+
max_index = tf.argmax(prediction[0]).numpy()
|
275 |
+
max_value = prediction[0][max_index].numpy()
|
276 |
+
|
277 |
+
st.write(f"Name: Uploaded Video")
|
278 |
+
st.write(f"Action Detected: {labels_map[max_index]} ({max_value*100:.2f}%)")
|
279 |
+
#st.write("-----------------------------------")
|
280 |
+
st.write(f"{labels_map[0]} Probability: {prediction[0][0]*100:.2f}%")
|
281 |
+
st.write(f"{labels_map[1]} Probability: {prediction[0][1]*100:.2f}%")
|
282 |
+
st.write("===================================")
|
283 |
+
return prediction#[0]
|
284 |
+
|
285 |
+
# Global Constants
|
286 |
+
WINDOW_SIZE = 30
|
287 |
+
SAMPLE_VIDEOS_UNTRIMMED = ["Video4.mp4", "Video5.mp4", "Video6.mp4","Video7.mp4","Video8.mp4","Video9.mp4", "Video10.mp4"]
|
288 |
+
SAMPLE_VIDEOS_TRIMMED = ["Video1.mp4", "Video2.mp4", "Video3.mp4"]
|
289 |
+
|
290 |
+
# Ensure your model is loaded globally
|
291 |
+
loaded_model = load_model('Updated_80_percent_new_model.h5')
|
292 |
+
|
293 |
+
def display_selected_sample_video(videos_list):
|
294 |
+
selected_video = st.selectbox("Select a sample video to play:", videos_list)
|
295 |
+
if os.path.exists(selected_video):
|
296 |
+
st.video(selected_video)
|
297 |
+
return selected_video
|
298 |
+
|
299 |
+
|
300 |
+
def get_image_base64(path):
|
301 |
+
with open(path, "rb") as img_file:
|
302 |
+
return base64.b64encode(img_file.read()).decode('utf-8')
|
303 |
+
|
304 |
+
|
305 |
+
def main():
|
306 |
+
# Page Settings
|
307 |
+
st.set_page_config(
|
308 |
+
page_title="Accident Detection Model",
|
309 |
+
layout="wide",
|
310 |
+
initial_sidebar_state="expanded",
|
311 |
+
)
|
312 |
+
|
313 |
+
st.markdown(
|
314 |
+
"""
|
315 |
+
<style>
|
316 |
+
/* Styles for entire page and main container */
|
317 |
+
body {
|
318 |
+
background-color: #e6e6e6;
|
319 |
+
}
|
320 |
+
.stApp {
|
321 |
+
background-color: #e6e6e6;
|
322 |
+
}
|
323 |
+
|
324 |
+
/* Styles for the header container */
|
325 |
+
.header-container {
|
326 |
+
background-color: #3a8d8b; /* Adjust the color if needed */
|
327 |
+
padding: 20px 40px; /* Adjusted padding to push it away from the edges a bit */
|
328 |
+
border-radius: 0; /* Remove the rounded corners */
|
329 |
+
margin: -10px -40px 10px -40px; /* Stretching the header to the full width */
|
330 |
+
}
|
331 |
+
|
332 |
+
/* Styling the text color inside the header */
|
333 |
+
.header-container h1, .header-container h2 {
|
334 |
+
color: white;
|
335 |
+
}
|
336 |
+
|
337 |
+
</style>
|
338 |
+
<div class="header-container">
|
339 |
+
<h1>Accident Detection Model</h1>
|
340 |
+
<h2>Dissertation on Accident Detection for Smart City Transportation</h2>
|
341 |
+
</div>
|
342 |
+
""",
|
343 |
+
unsafe_allow_html=True
|
344 |
+
)
|
345 |
+
|
346 |
+
st.markdown(
|
347 |
+
"""Developers: Victor Adewopo, Nelly Elsayed |
|
348 |
+
[Research Paper](https://arxiv.org/pdf/2310.10038.pdf)""",
|
349 |
+
unsafe_allow_html=True
|
350 |
+
)
|
351 |
+
|
352 |
+
st.warning("The models are still in development and were originally trained to detect trimmed 5 seconds non-overlapping actions.")
|
353 |
+
|
354 |
+
|
355 |
+
video_option = st.radio("", ["Untrimmed (Accident Detection)", "Trimmed (5 Seconds window)"])
|
356 |
+
st.markdown("<div class='big-heading'>Upload your own video or use any of the sample videos below:</div>", unsafe_allow_html=True)
|
357 |
+
|
358 |
+
# This makes the upload button appear at the top
|
359 |
+
uploaded_file = st.file_uploader("", type=['mp4', 'mov', 'avi', 'mkv'])
|
360 |
+
|
361 |
+
if video_option == "Untrimmed (Accident Detection)":
|
362 |
+
st.markdown("## Sample Videos:")
|
363 |
+
col1, col2 = st.columns(2)
|
364 |
+
with col1:
|
365 |
+
display_selected_sample_video(SAMPLE_VIDEOS_UNTRIMMED)
|
366 |
+
|
367 |
+
#uploaded_file = st.file_uploader("Upload your own video:", type=['mp4', 'mov', 'avi', 'mkv'])
|
368 |
+
with col2:
|
369 |
+
if uploaded_file:
|
370 |
+
tfile = tempfile.NamedTemporaryFile(delete=False)
|
371 |
+
tfile.write(uploaded_file.read())
|
372 |
+
#newcol1, newcol2 = st.columns(2) # Splitting the layout
|
373 |
+
|
374 |
+
progress_bar = st.progress(0)
|
375 |
+
st.write('Processing video...')
|
376 |
+
process_video(tfile.name, loaded_model) # Assuming your function for processing untrimmed videos
|
377 |
+
|
378 |
+
progress_bar.progress(50)
|
379 |
+
|
380 |
+
if os.path.exists('p_user_upload.mp4'):
|
381 |
+
st.write('Video processed. Displaying results...')
|
382 |
+
#st.video('p_user_upload.mp4')
|
383 |
+
|
384 |
+
st.video('p_user_upload.mp4')
|
385 |
+
progress_bar.progress(100)
|
386 |
+
else:
|
387 |
+
st.write("Error: Video processing failed.")
|
388 |
+
|
389 |
+
os.remove(tfile.name)
|
390 |
+
|
391 |
+
elif video_option == "Trimmed (5 Seconds window)":
|
392 |
+
st.markdown("## Sample Videos:")
|
393 |
+
col1, col2 = st.columns(2)
|
394 |
+
with col1:
|
395 |
+
selected_video_file = display_selected_sample_video(SAMPLE_VIDEOS_TRIMMED)
|
396 |
+
with col2:
|
397 |
+
progress_bar = st.progress(0)
|
398 |
+
st.write('Processing video...')
|
399 |
+
single_video_predict_on_frames(selected_video_file)
|
400 |
+
progress_bar.progress(100)
|
401 |
+
|
402 |
+
#uploaded_file = st.file_uploader("Upload your own video:", type=['mp4', 'mov', 'avi', 'mkv'])
|
403 |
+
|
404 |
+
if uploaded_file:
|
405 |
+
tfile = tempfile.NamedTemporaryFile(delete=False)
|
406 |
+
tfile.write(uploaded_file.read())
|
407 |
+
|
408 |
+
st.write('Displaying uploaded video...')
|
409 |
+
#st.video(tfile.name)
|
410 |
+
col1, col2 = st.columns(2) # Splitting the layout
|
411 |
+
col1.video(tfile.name)
|
412 |
+
with col2:
|
413 |
+
progress_bar = st.progress(0)
|
414 |
+
st.write('Processing video...')
|
415 |
+
single_video_predict_on_frames(tfile.name) # Assuming your function for processing trimmed videos
|
416 |
+
progress_bar.progress(100)
|
417 |
+
|
418 |
+
os.remove(tfile.name)
|
419 |
+
|
420 |
+
if __name__ == "__main__":
|
421 |
+
main()
|