shark_detection / app.py
Alexander Fengler
comments some langauge and allow skipping frames
f3a075d
raw
history blame
4.64 kB
import gradio as gr
import os
import subprocess
from huggingface_hub import snapshot_download
REPO_ID='SharkSpace/videos_examples'
snapshot_download(repo_id=REPO_ID, token= os.environ.get('SHARK_MODEL'),repo_type='dataset',local_dir='videos_example')
if os.getenv('SYSTEM') == 'spaces':
subprocess.call('pip install -U openmim'.split())
subprocess.call('pip install python-dotenv'.split())
subprocess.call('pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu113'.split())
subprocess.call('mim install mmcv>=2.0.0'.split())
subprocess.call('mim install mmengine'.split())
subprocess.call('mim install mmdet'.split())
subprocess.call('pip install opencv-python-headless==4.5.5.64'.split())
subprocess.call('pip install git+https://github.com/cocodataset/panopticapi.git'.split())
import cv2
import dotenv
dotenv.load_dotenv()
import numpy as np
import gradio as gr
import glob
from inference import inference_frame
import os
import pathlib
from time import time
def analyze_video(x, skip_frames = 5, frame_rate_out = 8):
print(x)
#Define path to saved images
path = '/tmp/test/'
os.makedirs(path, exist_ok=True)
# Define name of current video as number of videos in path
n_videos_in_path = len(os.listdir(path))
path = f'{path}{n_videos_in_path}'
os.makedirs(path, exist_ok=True)
# Define name of output video
outname = f'{path}_processed.mp4'
if os.path.exists(outname):
print('video already processed')
return outname
cap = cv2.VideoCapture(x)
counter = 0
while(cap.isOpened()):
start = time()
ret, frame = cap.read()
print(f'read time: {time()-start}')
if ret==True:
if counter % skip_frames == 0:
name = os.path.join(path,f'{counter:05d}.png')
start = time()
frame = inference_frame(frame)
print(f'inference time: {time()-start}')
# write the flipped frame
start = time()
cv2.imwrite(name, frame)
print(f'write time: {time()-start}')
else:
pass
print(counter)
counter +=1
else:
break
# Release everything if job is finished
cap.release()
# Create video from predicted images
print(path)
os.system(f'''ffmpeg -framerate {frame_rate_out} -pattern_type glob -i '{path}/*.png' -c:v libx264 -pix_fmt yuv420p {outname} -y''')
return outname
def set_example_image(example: list) -> dict:
return gr.Video.update(value=example[0])
def show_video(example: list) -> dict:
return gr.Video.update(value=example[0])
with gr.Blocks(title='Shark Patrol',theme=gr.themes.Soft(),live=True,) as demo:
gr.Markdown("Alpha Demo of the Sharkpatrol Oceanlife Detector.")
with gr.Tab("Preloaded Examples"):
with gr.Row():
video_example = gr.Video(source='upload',include_audio=False,stream=True)
with gr.Row():
paths = sorted(pathlib.Path('videos_example/').rglob('*rgb.mp4'))
example_preds = gr.Dataset(components=[video_example],
samples=[[path.as_posix()]
for path in paths])
example_preds.click(fn=show_video,
inputs=example_preds,
outputs=video_example)
with gr.Tab("Test your own Video"):
with gr.Row():
video_input = gr.Video(source='upload',include_audio=False)
#video_input.style(witdh='50%',height='50%')
video_output = gr.Video()
#video_output.style(witdh='50%',height='50%')
video_button = gr.Button("Analyze your Video")
with gr.Row():
paths = sorted(pathlib.Path('videos_example/').rglob('*.mp4'))
example_images = gr.Dataset(components=[video_input],
samples=[[path.as_posix()]
for path in paths if 'videos_side_by_side' not in str(path)])
video_button.click(analyze_video, inputs=video_input, outputs=video_output)
example_images.click(fn=set_example_image,
inputs=example_images,
outputs=video_input)
demo.queue()
#if os.getenv('SYSTEM') == 'spaces':
demo.launch(width='40%',auth=(os.environ.get('SHARK_USERNAME'), os.environ.get('SHARK_PASSWORD')))