File size: 3,032 Bytes
cd0d6f2
8c6ecf8
 
eddda5a
7576d10
b73d81d
cd0d6f2
5bbee66
6e8c2ef
 
4d701c0
eddda5a
e213266
7576d10
8c6ecf8
 
 
8353801
cd0d6f2
 
8353801
 
02cdb95
021ea63
5636b5c
 
8353801
cd0d6f2
5636b5c
f3a075d
02cdb95
f3a075d
8c6ecf8
 
021ea63
8c6ecf8
 
021ea63
8c6ecf8
 
 
 
 
 
 
5636b5c
8c6ecf8
 
5636b5c
8c6ecf8
5636b5c
8c6ecf8
 
5636b5c
8c6ecf8
 
 
 
 
 
 
 
 
 
5636b5c
8c6ecf8
 
5636b5c
8c6ecf8
 
 
 
5636b5c
8c6ecf8
 
 
7576d10
8c6ecf8
 
 
 
 
780307f
8c6ecf8
7576d10
 
8c6ecf8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87

import subprocess
import os  
if os.getenv('SYSTEM') == 'spaces':

    subprocess.call('pip install -U openmim'.split())
    subprocess.call('pip install python-dotenv'.split())
    subprocess.call('pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu113'.split())
    subprocess.call('mim install mmcv>=2.0.0'.split())
    subprocess.call('mim install mmengine'.split())
    subprocess.call('mim install mmdet'.split())
    subprocess.call('pip install opencv-python-headless==4.5.5.64'.split())
    subprocess.call('pip install git+https://github.com/cocodataset/panopticapi.git'.split())

import gradio as gr

from huggingface_hub import snapshot_download
import cv2 
import dotenv 
dotenv.load_dotenv()
import numpy as np
import gradio as gr
import glob
from inference import inference_frame,inference_frame_serial
from inference import inference_frame_par_ready
from inference import process_frame
import os
import pathlib
import multiprocessing as mp
from time import time


REPO_ID='SharkSpace/videos_examples'
snapshot_download(repo_id=REPO_ID, token=os.environ.get('SHARK_MODEL'),repo_type='dataset',local_dir='videos_example')

def process_video(input_video, out_fps = 'auto', skip_frames = 7):
    cap = cv2.VideoCapture(input_video)

    output_path = "output.mp4"
    if out_fps != 'auto' and type(out_fps) == int:
        fps = int(out_fps)
    else:
        fps = int(cap.get(cv2.CAP_PROP_FPS))
        if out_fps == 'auto':
            fps = int(fps / skip_frames)

    width  = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    video = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))

    iterating, frame = cap.read()
    cnt = 0
    
    while iterating:
        if (cnt % skip_frames) == 0:
            # flip frame vertically
            display_frame = inference_frame_serial(frame)
            video.write(cv2.cvtColor(display_frame, cv2.COLOR_BGR2RGB))
            print('sending frame')
            print(cnt)
            yield cv2.cvtColor(display_frame, cv2.COLOR_BGR2RGB), cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), None
        cnt += 1
        iterating, frame = cap.read()
    
    video.release()
    yield None, None, output_path

with gr.Blocks() as demo:
    with gr.Row():
        input_video = gr.Video(label="Input")
        output_video = gr.Video(label="Output Video")
    
    with gr.Row():
        processed_frames = gr.Image(label="Live Frame")
        original_frames = gr.Image(label="Original Frame")

    with gr.Row():
        paths = sorted(pathlib.Path('videos_example/').rglob('*.mp4'))
        samples=[[path.as_posix()] for path in paths if 'raw_videos'  in str(path)]
        examples = gr.Examples(samples, inputs=input_video)
        process_video_btn = gr.Button("Process Video")

    process_video_btn.click(process_video, input_video, [processed_frames, original_frames, output_video])

demo.queue()
demo.launch()