Ahsen Khaliq commited on
Commit
d30b2a2
1 Parent(s): 390d462

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -114,7 +114,7 @@ normalize = transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],
114
  std=[0.26862954, 0.26130258, 0.27577711])
115
  lpips_model = lpips.LPIPS(net='vgg').to(device)
116
 
117
- def inference(text):
118
  all_frames = []
119
  prompts = [text]
120
  image_prompts = []
@@ -124,8 +124,8 @@ def inference(text):
124
  range_scale = 50 # Controls how far out of range RGB values are allowed to be.
125
  cutn = 16
126
  n_batches = 1
127
- init_image = None # This can be an URL or Colab local path and must be in quotes.
128
- skip_timesteps = 0 # This needs to be between approx. 200 and 500 when using an init image.
129
  # Higher values make the output look more like the init.
130
  init_scale = 0 # This enhances the effect of the init image, a good value is 1000.
131
  seed = 0
@@ -214,6 +214,6 @@ def inference(text):
214
  title = "CLIP Guided Diffusion HQ"
215
  description = "Gradio demo for CLIP Guided Diffusion. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
216
  article = "<p style='text-align: center'> By Katherine Crowson (https://github.com/crowsonkb, https://twitter.com/RiversHaveWings). It uses OpenAI's 256x256 unconditional ImageNet diffusion model (https://github.com/openai/guided-diffusion) together with CLIP (https://github.com/openai/CLIP) to connect text prompts with images. | <a href='https://colab.research.google.com/drive/12a_Wrfi2_gwwAuN3VvMTwVMz9TfqctNj' target='_blank'>Colab</a></p>"
217
- iface = gr.Interface(inference, inputs="text", outputs=["image","video"], title=title, description=description, article=article, examples=[["coral reef city by artistation artists"]],
218
  enable_queue=True)
219
  iface.launch()
 
114
  std=[0.26862954, 0.26130258, 0.27577711])
115
  lpips_model = lpips.LPIPS(net='vgg').to(device)
116
 
117
+ def inference(text, image, skip_timesteps):
118
  all_frames = []
119
  prompts = [text]
120
  image_prompts = []
 
124
  range_scale = 50 # Controls how far out of range RGB values are allowed to be.
125
  cutn = 16
126
  n_batches = 1
127
+ init_image = image.name # This can be an URL or Colab local path and must be in quotes.
128
+ skip_timesteps = skip_timesteps # This needs to be between approx. 200 and 500 when using an init image.
129
  # Higher values make the output look more like the init.
130
  init_scale = 0 # This enhances the effect of the init image, a good value is 1000.
131
  seed = 0
 
214
  title = "CLIP Guided Diffusion HQ"
215
  description = "Gradio demo for CLIP Guided Diffusion. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
216
  article = "<p style='text-align: center'> By Katherine Crowson (https://github.com/crowsonkb, https://twitter.com/RiversHaveWings). It uses OpenAI's 256x256 unconditional ImageNet diffusion model (https://github.com/openai/guided-diffusion) together with CLIP (https://github.com/openai/CLIP) to connect text prompts with images. | <a href='https://colab.research.google.com/drive/12a_Wrfi2_gwwAuN3VvMTwVMz9TfqctNj' target='_blank'>Colab</a></p>"
217
+ iface = gr.Interface(inference, inputs=["text",gr.inputs.Image(type="file", label='initial image (optional)', optional=True),gr.inputs.Slider(minimum=0, maximum=500, step=1, default=0, label="skip_timesteps")], outputs=["image","video"], title=title, description=description, article=article, examples=[["coral reef city by artistation artists"]],
218
  enable_queue=True)
219
  iface.launch()