RamAnanth1 commited on
Commit
a92b0d1
β€’
1 Parent(s): 21c2481

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -2
app.py CHANGED
@@ -56,7 +56,7 @@ def sample_denoising_batch(model, noise_shape, condition, *args,
56
  @torch.no_grad()
57
  def sample_text2video(model, prompt, n_samples, batch_size,
58
  sample_type="ddim", sampler=None,
59
- ddim_steps=50, eta=1.0, cfg_scale=15.0,
60
  decode_frame_bs=1,
61
  ddp=False, all_gather=True,
62
  batch_progress=True, show_denoising_progress=False,
@@ -109,12 +109,22 @@ def save_results(videos,
109
  return os.path.join(save_subdir, f"{save_name}_{i:03d}.mp4")
110
 
111
  def get_video(prompt):
 
 
112
  samples = sample_text2video(model, prompt, n_samples = 1, batch_size = 1,
113
  sampler=ddim_sampler,
114
  )
115
  return save_results(samples)
116
 
 
 
 
117
  prompt_inp = gr.Textbox(label = "Prompt")
118
  result = gr.Video(label='Result')
119
- iface = gr.Interface(fn=get_video, inputs=[prompt_inp], outputs=[result])
 
 
 
 
 
120
  iface.launch()
 
56
  @torch.no_grad()
57
  def sample_text2video(model, prompt, n_samples, batch_size,
58
  sample_type="ddim", sampler=None,
59
+ ddim_steps=50, eta=1.0, cfg_scale=7.5,
60
  decode_frame_bs=1,
61
  ddp=False, all_gather=True,
62
  batch_progress=True, show_denoising_progress=False,
 
109
  return os.path.join(save_subdir, f"{save_name}_{i:03d}.mp4")
110
 
111
  def get_video(prompt):
112
+ seed = 1000
113
+ seed_everything(seed)
114
  samples = sample_text2video(model, prompt, n_samples = 1, batch_size = 1,
115
  sampler=ddim_sampler,
116
  )
117
  return save_results(samples)
118
 
119
+ title = 'Latent Video Diffusion Models'
120
+ DESCRIPTION = '<p>This model can only be used for non-commercial purposes. To learn more about the model, take a look at the <a href="https://github.com/VideoCrafter/VideoCrafter" style="text-decoration: underline;" target="_blank">model card</a>.</p>'
121
+
122
  prompt_inp = gr.Textbox(label = "Prompt")
123
  result = gr.Video(label='Result')
124
+ iface = gr.Interface(fn=get_video,
125
+ inputs=[prompt_inp],
126
+ outputs=[result],
127
+ title = title,
128
+ description = DESCRIPTION,
129
+ examples = [["An astronaut riding a horse"]])
130
  iface.launch()