import streamlit as st import cv2 import numpy as np import tensorflow as tf import random import time url = st.text_area('enter streaming url') locations = ['Miami', 'Smouha', 'Mandara', 'Sporting', 'Montazah'] model = tf.saved_model.load('my_model') classes_1 = ["Arson", "Assault", "Burglary","Normal"] # Define your classes here # Function to preprocess frames for action recognition def preprocess_frame(frame): frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = cv2.resize(frame, (224, 224)) frame = frame.astype('float32') / 255.0 frame = np.expand_dims(frame, axis=0) return frame def get_top_k(probs, k=1, label_map=classes_1): """Outputs the top k model labels and probabilities on the given video.""" top_predictions = tf.argsort(probs, axis=-1, direction='DESCENDING')[:k] top_labels = tf.gather(label_map, top_predictions, axis=-1) top_labels = [label.decode('utf8') for label in top_labels.numpy()] top_probs = tf.gather(probs, top_predictions, axis=-1).numpy() return top_labels[0] # Function to perform action recognition on the HLS stream def perform_action_recognition(url, model, k=1, label_map=classes_1, locations=locations): # Open the video stream cap = cv2.VideoCapture(url) start_time = time.time() # Capture the start time while True: # Read the next frame ret, frame = cap.read() if not ret: break # Preprocess the frame preprocessed_frame = preprocess_frame(frame) # Perform action recognition on the preprocessed frame outputs = model.signatures['serving_default'](image=preprocessed_frame[tf.newaxis]) probs = tf.nn.softmax(outputs['classifier_head_1']) current_time = time.time() - start_time # Calculate the elapsed time m, s = divmod(current_time, 60) h, m = divmod(m, 60) ip_address = last_part = url.split("/")[-1] output = [get_top_k(probs[0], k=k, label_map=label_map), f"{int(h):02d}:{int(m):02d}:{int(s):02d}", random.choice(locations), ip_address] yield output # Release the video stream cap.release() # Call the function to perform action recognition on the HLS stream # Create a placeholder for the output output_placeholder = st.empty() # Call the function to perform action recognition on the HLS stream and update the placeholder if url: for output in perform_action_recognition(url, model): output_placeholder.json(output)