""" Tests ability to load video in streamlit under the following conditions: - In-memory - From OpenCV object """ import numpy as np import cv2 import av import io import streamlit as st import pandas as pd import numpy as np from tqdm import tqdm def make_sample_image(i): """Build synthetic "raw BGR" image for testing""" p = width // 60 img = np.full((height, width, 3), 60, np.uint8) cv2.putText( img, str(i + 1), (width // 2 - p * 10 * len(str(i + 1)), height // 2 + p * 10), cv2.FONT_HERSHEY_DUPLEX, p, (255, 30, 30), p * 2, ) # Blue number return img n_frames = 100 # Select number of frames (for testing). width, height, fps = 192, 108, 10 # Select video resolution and framerate. output_memory_file = io.BytesIO() # Create BytesIO "in memory file". output = av.open( output_memory_file, "w", format="mp4" ) # Open "in memory file" as MP4 video output stream = output.add_stream( "h264", str(fps) ) # Add H.264 video stream to the MP4 container, with framerate = fps. stream.width = width # Set frame width stream.height = height # Set frame height stream.pix_fmt = "yuv420p" # NOTE: yuv444p doesn't work on mac. Select yuv444p pixel format (better quality than default yuv420p). stream.options = { "crf": "17" } # Select low crf for high quality (the price is larger file size). # Iterate the created images, encode and write to MP4 memory file. for i in tqdm(range(n_frames)): img = make_sample_image( i ) # Create OpenCV image for testing (resolution 192x108, pixel format BGR). frame = av.VideoFrame.from_ndarray( img, format="bgr24" ) # # Convert image from NumPy Array to frame. packet = stream.encode(frame) # Encode video frame output.mux(packet) # "Mux" the encoded frame (add the encoded frame to MP4 file). print("OUTPUT TYPE", type(output)) # Flush the encoder packet = stream.encode(None) output.mux(packet) output.close() output_memory_file.seek(0) # video_bytes = output_memory_file.read() # Convert BytesIO to bytes array st.video(output_memory_file)