Ahsen Khaliq commited on
Commit
075cc29
1 Parent(s): a0804a0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -2
app.py CHANGED
@@ -31,7 +31,7 @@ model_config.update({
31
  'class_cond': False,
32
  'diffusion_steps': 1000,
33
  'rescale_timesteps': False,
34
- 'timestep_respacing': '200',
35
  'image_size': 256,
36
  'learn_sigma': True,
37
  'noise_schedule': 'linear',
@@ -111,5 +111,11 @@ def inference(text):
111
  tqdm.write(f'Step {i}, output {j}:')
112
  #display.display(display.Image(filename))
113
  return 'progress_00000.png'
114
- iface = gr.Interface(inference, inputs="text", outputs="image")
 
 
 
 
 
 
115
  iface.launch()
 
31
  'class_cond': False,
32
  'diffusion_steps': 1000,
33
  'rescale_timesteps': False,
34
+ 'timestep_respacing': '300',
35
  'image_size': 256,
36
  'learn_sigma': True,
37
  'noise_schedule': 'linear',
 
111
  tqdm.write(f'Step {i}, output {j}:')
112
  #display.display(display.Image(filename))
113
  return 'progress_00000.png'
114
+
115
+ title = "CLIP guided Diffusion"
116
+ description = "Gradio demo for CLIP guided Diffusion. To use it, simply upload your text, or click one of the examples to load them. Read more at the links below."
117
+ article = "<p style='text-align: center'>By Katherine Crowson (https://github.com/crowsonkb, https://twitter.com/RiversHaveWings). It uses OpenAI's 256x256 unconditional ImageNet diffusion model (https://github.com/openai/guided-diffusion) together with CLIP (https://github.com/openai/CLIP) to connect text prompts with images. | <a href='https://colab.research.google.com/drive/1ED6_MYVXTApBHzQObUPaaMolgf9hZOOF' target='_blank'>Colab</a></p>"
118
+
119
+ iface = gr.Interface(inference, inputs="text", outputs="image", title=title, description=description, article=article, examples=[["coral reef city by artistation artists"]],
120
+ enable_queue=True)
121
  iface.launch()