Ahsen Khaliq commited on
Commit
3ab7435
1 Parent(s): e39fd98

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -5
app.py CHANGED
@@ -24,6 +24,8 @@ sys.path.append('./CLIP')
24
  sys.path.append('./guided-diffusion')
25
  import clip
26
  from guided_diffusion.script_util import create_model_and_diffusion, model_and_diffusion_defaults
 
 
27
  # Model settings
28
  model_config = model_and_diffusion_defaults()
29
  model_config.update({
@@ -60,6 +62,7 @@ def spherical_dist_loss(x, y):
60
  return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
61
 
62
  def inference(text):
 
63
  prompt = text
64
  batch_size = 1
65
  clip_guidance_scale = 2750
@@ -103,19 +106,24 @@ def inference(text):
103
 
104
  for i, sample in enumerate(samples):
105
  cur_t -= 1
106
- if i % 100 == 0 or cur_t == -1:
107
  print()
108
  for j, image in enumerate(sample['pred_xstart']):
109
- filename = f'progress_{j:05}.png'
110
- TF.to_pil_image(image.add(1).div(2).clamp(0, 1)).save(filename)
 
111
  tqdm.write(f'Step {i}, output {j}:')
112
  #display.display(display.Image(filename))
113
- return 'progress_00000.png'
 
 
 
 
114
 
115
  title = "CLIP Guided Diffusion"
116
  description = "Gradio demo for CLIP Guided Diffusion. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
117
  article = "<p style='text-align: center'>By Katherine Crowson (https://github.com/crowsonkb, https://twitter.com/RiversHaveWings). It uses OpenAI's 256x256 unconditional ImageNet diffusion model (https://github.com/openai/guided-diffusion) together with CLIP (https://github.com/openai/CLIP) to connect text prompts with images. | <a href='https://colab.research.google.com/drive/1ED6_MYVXTApBHzQObUPaaMolgf9hZOOF' target='_blank'>Colab</a></p>"
118
 
119
- iface = gr.Interface(inference, inputs="text", outputs="image", title=title, description=description, article=article, examples=[["coral reef city by artistation artists"]],
120
  enable_queue=True)
121
  iface.launch()
 
24
  sys.path.append('./guided-diffusion')
25
  import clip
26
  from guided_diffusion.script_util import create_model_and_diffusion, model_and_diffusion_defaults
27
+ import numpy as np
28
+ import imageio
29
  # Model settings
30
  model_config = model_and_diffusion_defaults()
31
  model_config.update({
 
62
  return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
63
 
64
  def inference(text):
65
+ all_frames = []
66
  prompt = text
67
  batch_size = 1
68
  clip_guidance_scale = 2750
 
106
 
107
  for i, sample in enumerate(samples):
108
  cur_t -= 1
109
+ if i % 1 == 0 or cur_t == -1:
110
  print()
111
  for j, image in enumerate(sample['pred_xstart']):
112
+ #filename = f'progress_{j:05}.png'
113
+ img = TF.to_pil_image(image.add(1).div(2).clamp(0, 1))
114
+ all_frames.append(img)
115
  tqdm.write(f'Step {i}, output {j}:')
116
  #display.display(display.Image(filename))
117
+ writer = imageio.get_writer('video.mp4', fps=20)
118
+ for im in all_frames:
119
+ writer.append_data(np.array(im))
120
+ writer.close()
121
+ return img, 'video.mp4'
122
 
123
  title = "CLIP Guided Diffusion"
124
  description = "Gradio demo for CLIP Guided Diffusion. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
125
  article = "<p style='text-align: center'>By Katherine Crowson (https://github.com/crowsonkb, https://twitter.com/RiversHaveWings). It uses OpenAI's 256x256 unconditional ImageNet diffusion model (https://github.com/openai/guided-diffusion) together with CLIP (https://github.com/openai/CLIP) to connect text prompts with images. | <a href='https://colab.research.google.com/drive/1ED6_MYVXTApBHzQObUPaaMolgf9hZOOF' target='_blank'>Colab</a></p>"
126
 
127
+ iface = gr.Interface(inference, inputs="text", outputs=["image","video"], title=title, description=description, article=article, examples=[["coral reef city by artistation artists"]],
128
  enable_queue=True)
129
  iface.launch()