fffiloni commited on
Commit
93f9e0a
1 Parent(s): 1cf77c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -30,7 +30,7 @@ def get_frame_count(filepath):
30
 
31
  video.release()
32
 
33
- LIMITS
34
  if frame_count > 36 :
35
  frame_count = 36 # limit to 236 frames to avoid cuDNN errors
36
 
@@ -200,12 +200,13 @@ with gr.Blocks(css=css) as demo:
200
  with gr.Column(elem_id="col-container"):
201
  gr.Markdown("""
202
  <h1 style="text-align: center;">ControlVideo</h1>
 
203
  """)
204
  with gr.Row():
205
  with gr.Column():
206
  #video_in = gr.Video(source="upload", type="filepath", visible=True)
207
- video_path = gr.Video(source="upload", type="filepath", visible=True)
208
- prompt = gr.Textbox(label="prompt")
209
  with gr.Column():
210
  video_length = gr.Slider(label="Video length", info="How many frames do you want to process ? For demo purpose, max is set to 24", minimum=1, maximum=12, step=1, value=2)
211
  with gr.Row():
@@ -214,10 +215,10 @@ with gr.Blocks(css=css) as demo:
214
  inference_steps = gr.Slider(label="Inference steps", minimum=25, maximum=50, step=1, value=25)
215
  submit_btn = gr.Button("Submit")
216
  with gr.Column():
217
- video_res = gr.Video(label="result")
218
  status = gr.Textbox(label="result")
219
  gr.Examples(
220
- examples=[["Indiana Jones moonwalk in the Jungle", "./examples/moonwalk.mp4", 'depth', 12, 42, 50]],
221
  fn=run_inference,
222
  inputs=[prompt,
223
  video_path,
 
30
 
31
  video.release()
32
 
33
+ # LIMITS
34
  if frame_count > 36 :
35
  frame_count = 36 # limit to 236 frames to avoid cuDNN errors
36
 
 
200
  with gr.Column(elem_id="col-container"):
201
  gr.Markdown("""
202
  <h1 style="text-align: center;">ControlVideo</h1>
203
+ <p> Pytorch implementation of "ControlVideo: Training-free Controllable Text-to-Video Generation" </p>
204
  """)
205
  with gr.Row():
206
  with gr.Column():
207
  #video_in = gr.Video(source="upload", type="filepath", visible=True)
208
+ video_path = gr.Video(source="upload", type="filepath", visible=True, elem_id="video-in")
209
+ prompt = gr.Textbox(label="prompt", elem_id="prompt-in")
210
  with gr.Column():
211
  video_length = gr.Slider(label="Video length", info="How many frames do you want to process ? For demo purpose, max is set to 24", minimum=1, maximum=12, step=1, value=2)
212
  with gr.Row():
 
215
  inference_steps = gr.Slider(label="Inference steps", minimum=25, maximum=50, step=1, value=25)
216
  submit_btn = gr.Button("Submit")
217
  with gr.Column():
218
+ video_res = gr.Video(label="result", elem_id="video-out")
219
  status = gr.Textbox(label="result")
220
  gr.Examples(
221
+ examples=[["Indiana Jones moonwalk in the Jungle", "./examples/moonwalk.mp4", 'depth', 12, 424242, 50]],
222
  fn=run_inference,
223
  inputs=[prompt,
224
  video_path,