hysts HF staff commited on
Commit
c4715ed
1 Parent(s): 72b4cee
Files changed (3) hide show
  1. README.md +1 -0
  2. app.py +7 -3
  3. requirements.txt +11 -11
README.md CHANGED
@@ -7,6 +7,7 @@ sdk: docker
7
  pinned: false
8
  license: mit
9
  duplicated_from: Tune-A-Video-library/Tune-A-Video-Training-UI
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
7
  pinned: false
8
  license: mit
9
  duplicated_from: Tune-A-Video-library/Tune-A-Video-Training-UI
10
+ suggested_hardware: t4-small
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -5,6 +5,7 @@ from __future__ import annotations
5
  import os
6
 
7
  import gradio as gr
 
8
 
9
  from inference import InferencePipeline
10
 
@@ -23,13 +24,16 @@ class InferenceUtil:
23
  return base_model, training_prompt
24
 
25
 
26
- TITLE = '# [Tune-A-Video](https://tuneavideo.github.io/)'
 
 
 
27
  HF_TOKEN = os.getenv('HF_TOKEN')
28
  pipe = InferencePipeline(HF_TOKEN)
29
  app = InferenceUtil(HF_TOKEN)
30
 
31
  with gr.Blocks(css='style.css') as demo:
32
- gr.Markdown(TITLE)
33
 
34
  with gr.Row():
35
  with gr.Column():
@@ -194,7 +198,7 @@ with gr.Blocks(css='style.css') as demo:
194
  ],
195
  outputs=result,
196
  fn=pipe.run,
197
- cache_examples=os.getenv('SYSTEM') == 'spaces')
198
 
199
  model_id.change(fn=app.load_model_info,
200
  inputs=model_id,
 
5
  import os
6
 
7
  import gradio as gr
8
+ import torch
9
 
10
  from inference import InferencePipeline
11
 
 
24
  return base_model, training_prompt
25
 
26
 
27
+ DESCRIPTION = '# [Tune-A-Video](https://tuneavideo.github.io/)'
28
+ if not torch.cuda.is_available():
29
+ DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
30
+
31
  HF_TOKEN = os.getenv('HF_TOKEN')
32
  pipe = InferencePipeline(HF_TOKEN)
33
  app = InferenceUtil(HF_TOKEN)
34
 
35
  with gr.Blocks(css='style.css') as demo:
36
+ gr.Markdown(DESCRIPTION)
37
 
38
  with gr.Row():
39
  with gr.Column():
 
198
  ],
199
  outputs=result,
200
  fn=pipe.run,
201
+ cache_examples=os.getenv('CACHE_EXAMPLES') == '1')
202
 
203
  model_id.change(fn=app.load_model_info,
204
  inputs=model_id,
requirements.txt CHANGED
@@ -1,19 +1,19 @@
1
- accelerate==0.15.0
2
- bitsandbytes==0.35.4
3
  decord==0.6.0
4
  diffusers[torch]==0.11.1
5
- einops==0.6.0
6
  ftfy==6.1.1
7
- gradio==3.18.0
8
- huggingface-hub==0.12.0
9
- imageio==2.25.0
10
  imageio-ffmpeg==0.4.8
11
  omegaconf==2.3.0
12
- Pillow==9.4.0
13
- python-slugify==7.0.0
14
- tensorboard==2.11.2
15
  torch==1.13.1
16
  torchvision==0.14.1
17
- transformers==4.26.0
18
- triton==2.0.0.dev20221202
19
  xformers==0.0.16
 
1
+ accelerate==0.20.3
2
+ bitsandbytes==0.39.0
3
  decord==0.6.0
4
  diffusers[torch]==0.11.1
5
+ einops==0.6.1
6
  ftfy==6.1.1
7
+ gradio==3.34.0
8
+ huggingface-hub==0.15.1
9
+ imageio==2.31.0
10
  imageio-ffmpeg==0.4.8
11
  omegaconf==2.3.0
12
+ Pillow==9.5.0
13
+ python-slugify==8.0.1
14
+ tensorboard==2.12.3
15
  torch==1.13.1
16
  torchvision==0.14.1
17
+ transformers==4.30.2
18
+ triton==2.0.0
19
  xformers==0.0.16