hysts HF staff commited on
Commit
4c44f87
1 Parent(s): c4715ed
Files changed (2) hide show
  1. app.py +4 -1
  2. requirements.txt +2 -2
app.py CHANGED
@@ -28,6 +28,9 @@ DESCRIPTION = '# [Tune-A-Video](https://tuneavideo.github.io/)'
28
  if not torch.cuda.is_available():
29
  DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
30
 
 
 
 
31
  HF_TOKEN = os.getenv('HF_TOKEN')
32
  pipe = InferencePipeline(HF_TOKEN)
33
  app = InferenceUtil(HF_TOKEN)
@@ -198,7 +201,7 @@ with gr.Blocks(css='style.css') as demo:
198
  ],
199
  outputs=result,
200
  fn=pipe.run,
201
- cache_examples=os.getenv('CACHE_EXAMPLES') == '1')
202
 
203
  model_id.change(fn=app.load_model_info,
204
  inputs=model_id,
 
28
  if not torch.cuda.is_available():
29
  DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
30
 
31
+ CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv(
32
+ 'CACHE_EXAMPLES') == '1'
33
+
34
  HF_TOKEN = os.getenv('HF_TOKEN')
35
  pipe = InferencePipeline(HF_TOKEN)
36
  app = InferenceUtil(HF_TOKEN)
 
201
  ],
202
  outputs=result,
203
  fn=pipe.run,
204
+ cache_examples=CACHE_EXAMPLES)
205
 
206
  model_id.change(fn=app.load_model_info,
207
  inputs=model_id,
requirements.txt CHANGED
@@ -4,8 +4,8 @@ decord==0.6.0
4
  diffusers[torch]==0.11.1
5
  einops==0.6.1
6
  ftfy==6.1.1
7
- gradio==3.34.0
8
- huggingface-hub==0.15.1
9
  imageio==2.31.0
10
  imageio-ffmpeg==0.4.8
11
  omegaconf==2.3.0
 
4
  diffusers[torch]==0.11.1
5
  einops==0.6.1
6
  ftfy==6.1.1
7
+ gradio==3.36.1
8
+ huggingface-hub==0.16.4
9
  imageio==2.31.0
10
  imageio-ffmpeg==0.4.8
11
  omegaconf==2.3.0