SakuraD commited on
Commit
aeed542
1 Parent(s): 2e6f85e
Files changed (3) hide show
  1. README.md +1 -1
  2. __pycache__/transforms.cpython-310.pyc +0 -0
  3. app.py +33 -27
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🐍
4
  colorFrom: blue
5
  colorTo: green
6
  sdk: gradio
7
- sdk_version: 3.29.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
 
4
  colorFrom: blue
5
  colorTo: green
6
  sdk: gradio
7
+ sdk_version: 4.21.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
__pycache__/transforms.cpython-310.pyc CHANGED
Binary files a/__pycache__/transforms.cpython-310.pyc and b/__pycache__/transforms.cpython-310.pyc differ
 
app.py CHANGED
@@ -1,8 +1,13 @@
1
  import os
 
2
 
3
  # install packages for mamba
4
- print("Install personal packages", flush=True)
5
- os.system("bash install.sh")
 
 
 
 
6
 
7
  import torch
8
  import torch.nn as nn
@@ -25,7 +30,6 @@ import gradio as gr
25
  from huggingface_hub import hf_hub_download
26
 
27
 
28
-
29
  # Device on which to run the model
30
  # Set to cuda to load on GPU
31
  device = "cuda"
@@ -87,6 +91,7 @@ def load_video(video_path):
87
  return torch_imgs
88
 
89
 
 
90
  def inference_video(video):
91
  vid = load_video(video)
92
 
@@ -105,6 +110,7 @@ def set_example_video(example: list) -> dict:
105
  return gr.Video.update(value=example[0])
106
 
107
 
 
108
  def inference_image(img):
109
  image = img
110
  image_transform = T.Compose(
@@ -141,30 +147,30 @@ with demo:
141
  )
142
 
143
  with gr.Tab("Video"):
144
- with gr.Box():
145
- with gr.Row():
146
- with gr.Column():
147
- with gr.Row():
148
- input_video = gr.Video(label='Input Video').style(height=360)
149
- with gr.Row():
150
- submit_video_button = gr.Button('Submit')
151
- with gr.Column():
152
- label_video = gr.Label(num_top_classes=5)
153
- with gr.Row():
154
- example_videos = gr.Dataset(components=[input_video], samples=[['./videos/hitting_baseball.mp4'], ['./videos/hoverboarding.mp4'], ['./videos/yoga.mp4']])
155
 
156
  with gr.Tab("Image"):
157
- with gr.Box():
158
- with gr.Row():
159
- with gr.Column():
160
- with gr.Row():
161
- input_image = gr.Image(label='Input Image', type='pil').style(height=360)
162
- with gr.Row():
163
- submit_image_button = gr.Button('Submit')
164
- with gr.Column():
165
- label_image = gr.Label(num_top_classes=5)
166
- with gr.Row():
167
- example_images = gr.Dataset(components=[input_image], samples=[['./images/cat.png'], ['./images/dog.png'], ['./images/panda.png']])
168
 
169
  gr.Markdown(
170
  """
@@ -173,9 +179,9 @@ with demo:
173
  )
174
 
175
  submit_video_button.click(fn=inference_video, inputs=input_video, outputs=label_video)
176
- example_videos.click(fn=set_example_video, inputs=example_videos, outputs=example_videos.components)
177
  submit_image_button.click(fn=inference_image, inputs=input_image, outputs=label_image)
178
- example_images.click(fn=set_example_image, inputs=example_images, outputs=example_images.components)
179
 
180
  demo.launch(enable_queue=True)
181
  # demo.launch(server_name="0.0.0.0", server_port=10034, enable_queue=True)
 
1
  import os
2
+ import spaces
3
 
4
  # install packages for mamba
5
+ @spaces.GPU
6
+ def install():
7
+ print("Install personal packages", flush=True)
8
+ os.system("bash install.sh")
9
+
10
+ install()
11
 
12
  import torch
13
  import torch.nn as nn
 
30
  from huggingface_hub import hf_hub_download
31
 
32
 
 
33
  # Device on which to run the model
34
  # Set to cuda to load on GPU
35
  device = "cuda"
 
91
  return torch_imgs
92
 
93
 
94
+ @spaces.GPU
95
  def inference_video(video):
96
  vid = load_video(video)
97
 
 
110
  return gr.Video.update(value=example[0])
111
 
112
 
113
+ @spaces.GPU
114
  def inference_image(img):
115
  image = img
116
  image_transform = T.Compose(
 
147
  )
148
 
149
  with gr.Tab("Video"):
150
+ # with gr.Box():
151
+ with gr.Row():
152
+ with gr.Column():
153
+ with gr.Row():
154
+ input_video = gr.Video(label='Input Video').style(height=360)
155
+ with gr.Row():
156
+ submit_video_button = gr.Button('Submit')
157
+ with gr.Column():
158
+ label_video = gr.Label(num_top_classes=5)
159
+ with gr.Row():
160
+ example_videos = gr.Dataset(components=[input_video], samples=[['./videos/hitting_baseball.mp4'], ['./videos/hoverboarding.mp4'], ['./videos/yoga.mp4']])
161
 
162
  with gr.Tab("Image"):
163
+ # with gr.Box():
164
+ with gr.Row():
165
+ with gr.Column():
166
+ with gr.Row():
167
+ input_image = gr.Image(label='Input Image', type='pil').style(height=360)
168
+ with gr.Row():
169
+ submit_image_button = gr.Button('Submit')
170
+ with gr.Column():
171
+ label_image = gr.Label(num_top_classes=5)
172
+ with gr.Row():
173
+ example_images = gr.Dataset(components=[input_image], samples=[['./images/cat.png'], ['./images/dog.png'], ['./images/panda.png']])
174
 
175
  gr.Markdown(
176
  """
 
179
  )
180
 
181
  submit_video_button.click(fn=inference_video, inputs=input_video, outputs=label_video)
182
+ example_videos.click(fn=set_example_video, inputs=example_videos, outputs=example_videos._components)
183
  submit_image_button.click(fn=inference_image, inputs=input_image, outputs=label_image)
184
+ example_images.click(fn=set_example_image, inputs=example_images, outputs=example_images._components)
185
 
186
  demo.launch(enable_queue=True)
187
  # demo.launch(server_name="0.0.0.0", server_port=10034, enable_queue=True)