nam_nguyenhoai_AI commited on
Commit
4fe0a37
1 Parent(s): 14c776e

Add application file

Browse files
Files changed (2) hide show
  1. .gitattributes +2 -0
  2. app.py +52 -0
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ .git/assets/Jumps.mp4 filter=lfs diff=lfs merge=lfs -text
37
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ import numpy as np
4
+ from transformers import pipeline
5
+ import os
6
+ import torch
7
+ import torch.nn.functional as F
8
+ from torchvision import transforms
9
+ from torchvision.transforms import Compose
10
+ import spaces
11
+
12
+ title = "# Video Summarization using Timesformer Demo"
13
+ description = """ Author: Nguyen Hoai Nam."""
14
+ css = """
15
+ #img-display-container {
16
+ max-height: 100vh;
17
+ }
18
+ #img-display-input {
19
+ max-height: 80vh;
20
+ }
21
+ #img-display-output {
22
+ max-height: 80vh;
23
+ }
24
+ """
25
+
26
+ with gr.Blocks(css=css) as demo:
27
+ gr.Markdown(title)
28
+ gr.Markdown(description)
29
+ gr.Markdown("### Video Summarization demo")
30
+
31
+ with gr.Row():
32
+ input_video = gr.Video(label="Input Video")
33
+ model_type = gr.Dropdown(["K-means", "Sum of Squared Difference 01", "Sum of Squared Difference 02"], type="value", label='Model Type')
34
+ submit = gr.Button("Submit")
35
+ processed_video = gr.Video(label="Summarized Video")
36
+
37
+ def on_submit(uploaded_video,model_type):
38
+ pass
39
+ # Process the video and get the path of the output video
40
+ #output_video_path = make_video(uploaded_video,encoder=model_type)
41
+
42
+ #return output_video_path
43
+
44
+ submit.click(on_submit, inputs=[input_video, model_type], outputs=processed_video)
45
+
46
+ example_files = os.listdir('assets/examples_video')
47
+ example_files.sort()
48
+ example_files = [os.path.join('assets/examples_video', filename) for filename in example_files]
49
+ examples = gr.Examples(examples=example_files, inputs=[input_video], outputs=processed_video, fn=on_submit, cache_examples=True)
50
+
51
+ if __name__ == '__main__':
52
+ demo.queue().launch()