Ahsen Khaliq commited on
Commit
1f3e0d6
1 Parent(s): 8231f6c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -202,6 +202,7 @@ def inference(text):
202
  for k, image in enumerate(sample['pred_xstart']):
203
  #filename = f'progress_{i * batch_size + k:05}.png'
204
  img = TF.to_pil_image(image.add(1).div(2).clamp(0, 1))
 
205
  tqdm.write(f'Batch {i}, step {j}, output {k}:')
206
  #display.display(display.Image(filename))
207
  writer = imageio.get_writer('video.mp4', fps=20)
@@ -212,7 +213,7 @@ def inference(text):
212
 
213
  title = "CLIP Guided Diffusion HQ"
214
  description = "Gradio demo for CLIP Guided Diffusion. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
215
- article = "<p style='text-align: center'>By Katherine Crowson (https://github.com/crowsonkb, https://twitter.com/RiversHaveWings). It uses OpenAI's 256x256 unconditional ImageNet diffusion model (https://github.com/openai/guided-diffusion) together with CLIP (https://github.com/openai/CLIP) to connect text prompts with images. | <a href='https://colab.research.google.com/drive/1ED6_MYVXTApBHzQObUPaaMolgf9hZOOF' target='_blank'>Colab</a></p>"
216
  iface = gr.Interface(inference, inputs="text", outputs=["image","video"], title=title, description=description, article=article, examples=[["coral reef city by artistation artists"]],
217
  enable_queue=True)
218
  iface.launch()
 
202
  for k, image in enumerate(sample['pred_xstart']):
203
  #filename = f'progress_{i * batch_size + k:05}.png'
204
  img = TF.to_pil_image(image.add(1).div(2).clamp(0, 1))
205
+ all_frames.append(img)
206
  tqdm.write(f'Batch {i}, step {j}, output {k}:')
207
  #display.display(display.Image(filename))
208
  writer = imageio.get_writer('video.mp4', fps=20)
 
213
 
214
  title = "CLIP Guided Diffusion HQ"
215
  description = "Gradio demo for CLIP Guided Diffusion. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
216
+ article = "<p style='text-align: center'> By Katherine Crowson (https://github.com/crowsonkb, https://twitter.com/RiversHaveWings). It uses OpenAI's 256x256 unconditional ImageNet diffusion model (https://github.com/openai/guided-diffusion) together with CLIP (https://github.com/openai/CLIP) to connect text prompts with images. | <a href='https://colab.research.google.com/drive/12a_Wrfi2_gwwAuN3VvMTwVMz9TfqctNj' target='_blank'>Colab</a></p>"
217
  iface = gr.Interface(inference, inputs="text", outputs=["image","video"], title=title, description=description, article=article, examples=[["coral reef city by artistation artists"]],
218
  enable_queue=True)
219
  iface.launch()