Spaces:
Runtime error
Runtime error
Ahsen Khaliq
commited on
Commit
•
f884994
1
Parent(s):
a715393
Update app.py
Browse files
app.py
CHANGED
@@ -177,10 +177,10 @@ def inference(text,steps,image,mode, seed):
|
|
177 |
title = "StyleGAN3+CLIP"
|
178 |
description = "Gradio demo for StyleGAN3+CLIP: Generates images (mostly faces) using StyleGAN3 with CLIP guidance. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
|
179 |
article = "<p style='text-align: center'><a href='https://colab.research.google.com/drive/1eYlenR1GHPZXt-YuvXabzO9wfh9CWY36' target='_blank'>Colab</a> Written by nshepperd (https://twitter.com/nshepperd1, https://github.com/nshepperd). Thanks to Katherine Crowson (https://twitter.com/RiversHaveWings, https://github.com/crowsonkb) for coming up with many improved sampling tricks, as well as some of the code | <a href='https://github.com/NVlabs/stylegan3' target='_blank'>StyleGAN3 Gihub</a> | <a href='https://github.com/openai/CLIP' target='_blank'>CLIP Github</a></p>"
|
180 |
-
examples = [['mario',150,None,"CLIP+StyleGAN3"
|
181 |
gr.Interface(
|
182 |
inference,
|
183 |
-
["text",gr.inputs.Slider(minimum=50, maximum=200, step=1, default=150, label="steps"),gr.inputs.Image(type="pil", label="Image (Optional)", optional=True),gr.inputs.Radio(["CLIP+StyleGAN3","Stylegan3"], type="value", default="CLIP+StyleGAN3", label="mode"),gr.inputs.Slider(minimum=5, maximum=
|
184 |
[gr.outputs.Image(type="pil", label="Output"),"playable_video"],
|
185 |
title=title,
|
186 |
description=description,
|
|
|
177 |
title = "StyleGAN3+CLIP"
|
178 |
description = "Gradio demo for StyleGAN3+CLIP: Generates images (mostly faces) using StyleGAN3 with CLIP guidance. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
|
179 |
article = "<p style='text-align: center'><a href='https://colab.research.google.com/drive/1eYlenR1GHPZXt-YuvXabzO9wfh9CWY36' target='_blank'>Colab</a> Written by nshepperd (https://twitter.com/nshepperd1, https://github.com/nshepperd). Thanks to Katherine Crowson (https://twitter.com/RiversHaveWings, https://github.com/crowsonkb) for coming up with many improved sampling tricks, as well as some of the code | <a href='https://github.com/NVlabs/stylegan3' target='_blank'>StyleGAN3 Gihub</a> | <a href='https://github.com/openai/CLIP' target='_blank'>CLIP Github</a></p>"
|
180 |
+
examples = [['mario',150,None,"CLIP+StyleGAN3"],['',150,None,"Stylegan3",10]]
|
181 |
gr.Interface(
|
182 |
inference,
|
183 |
+
["text",gr.inputs.Slider(minimum=50, maximum=200, step=1, default=150, label="steps"),gr.inputs.Image(type="pil", label="Image (Optional)", optional=True),gr.inputs.Radio(["CLIP+StyleGAN3","Stylegan3"], type="value", default="CLIP+StyleGAN3", label="mode"),gr.inputs.Slider(minimum=5, maximum=10, step=1, default=5, label="seed (for stylegan3)")],
|
184 |
[gr.outputs.Image(type="pil", label="Output"),"playable_video"],
|
185 |
title=title,
|
186 |
description=description,
|