Delete app.py
Browse files
app.py
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import cv2
|
3 |
-
import numpy as np
|
4 |
-
import tensorflow as tf
|
5 |
-
import random
|
6 |
-
import time
|
7 |
-
|
8 |
-
url = st.text_area('enter streaming url')
|
9 |
-
locations = ['Miami', 'Smouha', 'Mandara', 'Sporting', 'Montazah']
|
10 |
-
model = tf.saved_model.load('my_model')
|
11 |
-
|
12 |
-
classes_1 = ["RoadAccidents", "Fighting", "NormalVideos"] # Define your classes here
|
13 |
-
|
14 |
-
# Function to preprocess frames for action recognition
|
15 |
-
def preprocess_frame(frame):
|
16 |
-
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
17 |
-
frame = cv2.resize(frame, (224, 224))
|
18 |
-
frame = frame.astype('float32') / 255.0
|
19 |
-
frame = np.expand_dims(frame, axis=0)
|
20 |
-
return frame
|
21 |
-
|
22 |
-
def get_top_k(probs, k=1, label_map=classes_1):
|
23 |
-
"""Outputs the top k model labels and probabilities on the given video."""
|
24 |
-
top_predictions = tf.argsort(probs, axis=-1, direction='DESCENDING')[:k]
|
25 |
-
top_labels = tf.gather(label_map, top_predictions, axis=-1)
|
26 |
-
top_labels = [label.decode('utf8') for label in top_labels.numpy()]
|
27 |
-
top_probs = tf.gather(probs, top_predictions, axis=-1).numpy()
|
28 |
-
return top_labels[0]
|
29 |
-
|
30 |
-
# Function to perform action recognition on the HLS stream
|
31 |
-
def perform_action_recognition(url, model, k=1, label_map=classes_1, locations=locations):
|
32 |
-
# Open the video stream
|
33 |
-
cap = cv2.VideoCapture(url)
|
34 |
-
start_time = time.time() # Capture the start time
|
35 |
-
|
36 |
-
while True:
|
37 |
-
# Read the next frame
|
38 |
-
ret, frame = cap.read()
|
39 |
-
|
40 |
-
if not ret:
|
41 |
-
break
|
42 |
-
|
43 |
-
# Preprocess the frame
|
44 |
-
preprocessed_frame = preprocess_frame(frame)
|
45 |
-
|
46 |
-
# Perform action recognition on the preprocessed frame
|
47 |
-
outputs = model.signatures['serving_default'](image=preprocessed_frame[tf.newaxis])
|
48 |
-
probs = tf.nn.softmax(outputs['classifier_head_1'])
|
49 |
-
current_time = time.time() - start_time # Calculate the elapsed time
|
50 |
-
m, s = divmod(current_time, 60)
|
51 |
-
h, m = divmod(m, 60)
|
52 |
-
ip_address = last_part = url.split("/")[-1]
|
53 |
-
output = [get_top_k(probs[0], k=k, label_map=label_map), f"{int(h):02d}:{int(m):02d}:{int(s):02d}", random.choice(locations), ip_address]
|
54 |
-
yield output
|
55 |
-
|
56 |
-
# Release the video stream
|
57 |
-
cap.release()
|
58 |
-
|
59 |
-
# Call the function to perform action recognition on the HLS stream
|
60 |
-
|
61 |
-
# Create a placeholder for the output
|
62 |
-
output_placeholder = st.empty()
|
63 |
-
|
64 |
-
# Call the function to perform action recognition on the HLS stream and update the placeholder
|
65 |
-
if url:
|
66 |
-
for output in perform_action_recognition(url, model):
|
67 |
-
output_placeholder.json(output)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|