Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import os | |
| import subprocess | |
| if os.getenv('SYSTEM') == 'spaces': | |
| subprocess.call('pip install -U openmim'.split()) | |
| subprocess.call('pip install python-dotenv'.split()) | |
| subprocess.call('pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu113'.split()) | |
| subprocess.call('mim install mmcv>=2.0.0'.split()) | |
| subprocess.call('mim install mmengine'.split()) | |
| subprocess.call('mim install mmdet'.split()) | |
| subprocess.call('pip install opencv-python-headless==4.5.5.64'.split()) | |
| subprocess.call('pip install git+https://github.com/cocodataset/panopticapi.git'.split()) | |
| import cv2 | |
| import dotenv | |
| dotenv.load_dotenv() | |
| import numpy as np | |
| import gradio as gr | |
| from inference import inference_frame | |
| import os | |
| import pathlib | |
| def analize_video(x): | |
| print(x) | |
| path = '/tmp/test/' | |
| os.makedirs(path, exist_ok=True) | |
| videos = len(os.listdir(path)) | |
| path = f'{path}{videos}' | |
| os.makedirs(path, exist_ok=True) | |
| outname = f'{path}_processed.mp4' | |
| if os.path.exists(outname): | |
| print('video already processed') | |
| return outname | |
| cap = cv2.VideoCapture(x) | |
| counter = 0 | |
| while(cap.isOpened()): | |
| ret, frame = cap.read() | |
| if ret==True: | |
| name = os.path.join(path,f'{counter:05d}.png') | |
| frame = inference_frame(frame) | |
| # write the flipped frame | |
| cv2.imwrite(name, frame) | |
| counter +=1 | |
| else: | |
| break | |
| # Release everything if job is finished | |
| print(path) | |
| os.system(f'''ffmpeg -framerate 20 -pattern_type glob -i '{path}/*.png' -c:v libx264 -pix_fmt yuv420p {outname} -y''') | |
| return outname | |
| def set_example_image(example: list) -> dict: | |
| return gr.Video.update(value=example[0]) | |
| with gr.Blocks(title='Shark Patrol',theme=gr.themes.Soft(),live=True,) as demo: | |
| gr.Markdown("Initial DEMO.") | |
| with gr.Tab("Shark Detector"): | |
| with gr.Row(): | |
| video_input = gr.Video(source='upload',include_audio=False) | |
| #video_input.style(witdh='50%',height='50%') | |
| video_output = gr.Video() | |
| #video_output.style(witdh='50%',height='50%') | |
| video_button = gr.Button("Analyze") | |
| with gr.Row(): | |
| paths = sorted(pathlib.Path('videos_example').rglob('*.mp4')) | |
| example_images = gr.Dataset(components=[video_input], | |
| samples=[[path.as_posix()] | |
| for path in paths]) | |
| with gr.Accordion("Open for More!"): | |
| gr.Markdown("Place holder for detection") | |
| video_button.click(analize_video, inputs=video_input, outputs=video_output) | |
| example_images.click(fn=set_example_image, | |
| inputs=example_images, | |
| outputs=video_input) | |
| demo.queue() | |
| #if os.getenv('SYSTEM') == 'spaces': | |
| demo.launch(width='40%',auth=(os.environ.get('SHARK_USERNAME'), os.environ.get('SHARK_PASSWORD'))) | |