|
import gradio as gr |
|
import cv2 |
|
import numpy as np |
|
from transformers import pipeline |
|
import os |
|
import torch |
|
import torch.nn.functional as F |
|
from torchvision import transforms |
|
from torchvision.transforms import Compose |
|
import spaces |
|
|
|
title = "# Video Summarization using Timesformer Demo" |
|
description = """ Author: Nguyen Hoai Nam.""" |
|
css = """ |
|
#img-display-container { |
|
max-height: 100vh; |
|
} |
|
#img-display-input { |
|
max-height: 80vh; |
|
} |
|
#img-display-output { |
|
max-height: 80vh; |
|
} |
|
""" |
|
|
|
with gr.Blocks(css=css) as demo: |
|
gr.Markdown(title) |
|
gr.Markdown(description) |
|
gr.Markdown("### Video Summarization demo") |
|
|
|
with gr.Row(): |
|
input_video = gr.Video(label="Input Video") |
|
model_type = gr.Dropdown(["K-means", "Sum of Squared Difference 01", "Sum of Squared Difference 02"], type="value", label='Model Type') |
|
submit = gr.Button("Submit") |
|
processed_video = gr.Video(label="Summarized Video") |
|
|
|
def on_submit(uploaded_video,model_type): |
|
pass |
|
|
|
|
|
|
|
|
|
|
|
submit.click(on_submit, inputs=[input_video, model_type], outputs=processed_video) |
|
|
|
example_files = os.listdir('assets/examples_video') |
|
example_files.sort() |
|
example_files = [os.path.join('assets/examples_video', filename) for filename in example_files] |
|
examples = gr.Examples(examples=example_files, inputs=[input_video], outputs=processed_video, fn=on_submit, cache_examples=True) |
|
|
|
if __name__ == '__main__': |
|
demo.queue().launch() |