Update app.py
Browse files
app.py
CHANGED
@@ -26,8 +26,10 @@ def ask(input_im, scale, steps, seed, images):
|
|
26 |
def main(input_im, scale, steps, seed):
|
27 |
|
28 |
images = []
|
29 |
-
|
30 |
-
|
|
|
|
|
31 |
|
32 |
return images
|
33 |
|
@@ -49,20 +51,18 @@ output.style(grid=2, height="")
|
|
49 |
|
50 |
description = \
|
51 |
"""
|
52 |
-
<p style='text-align: center;'>This demo is running on CPU. Working version fixed by Sylvain <a href='https://twitter.com/fffiloni' target='_blank'>@fffiloni</a>. You'll get 4 images variations. NSFW filters enabled
|
53 |
Generate variations on an input image using a fine-tuned version of Stable Diffusion.<br />
|
54 |
Trained by <a href='https://www.justinpinkney.com' target='_blank'>Justin Pinkney</a> (<a href='https://twitter.com/Buntworthy' target='_blank'>@Buntworthy</a>) at <a href='https://lambdalabs.com/' target='_blank'>Lambda</a><br />
|
55 |
This version has been ported to π€ Diffusers library, see more details on how to use this version in the <a href='https://github.com/LambdaLabsML/lambda-diffusers' target='_blank'>Lambda Diffusers repo</a>.<br />
|
56 |
-
|
57 |
For the original training code see <a href='https://github.com/justinpinkney/stable-diffusion' target='_blank'>this repo</a>.
|
58 |
-
<img src='https://raw.githubusercontent.com/justinpinkney/stable-diffusion/main/assets/im-vars-thin.jpg'/>
|
59 |
-
|
60 |
</p>
|
61 |
"""
|
62 |
|
63 |
article = \
|
64 |
"""
|
65 |
-
|
66 |
## How does this work?
|
67 |
The normal Stable Diffusion model is trained to be conditioned on text input. This version has had the original text encoder (from CLIP) removed, and replaced with
|
68 |
the CLIP _image_ encoder instead. So instead of generating images based a text input, images are generated to match CLIP's embedding of the image.
|
|
|
26 |
def main(input_im, scale, steps, seed):
|
27 |
|
28 |
images = []
|
29 |
+
for i in range(2):
|
30 |
+
images = ask(input_im, scale, n_samples, steps, seed, images)
|
31 |
+
#images = ask(input_im, scale, n_samples, steps, seed, images)
|
32 |
+
#images = ask(input_im, scale, n_samples, steps, seed, images)
|
33 |
|
34 |
return images
|
35 |
|
|
|
51 |
|
52 |
description = \
|
53 |
"""
|
54 |
+
<p style='text-align: center;'>This demo is running on CPU. Working version fixed by Sylvain <a href='https://twitter.com/fffiloni' target='_blank'>@fffiloni</a>. You'll get 4 images variations. NSFW filters enabled. <img id='visitor-badge' alt='visitor badge' src='https://visitor-badge.glitch.me/badge?page_id=gradio-blocks.sd-img-variations' style='display: inline-block' /><br />
|
55 |
Generate variations on an input image using a fine-tuned version of Stable Diffusion.<br />
|
56 |
Trained by <a href='https://www.justinpinkney.com' target='_blank'>Justin Pinkney</a> (<a href='https://twitter.com/Buntworthy' target='_blank'>@Buntworthy</a>) at <a href='https://lambdalabs.com/' target='_blank'>Lambda</a><br />
|
57 |
This version has been ported to π€ Diffusers library, see more details on how to use this version in the <a href='https://github.com/LambdaLabsML/lambda-diffusers' target='_blank'>Lambda Diffusers repo</a>.<br />
|
|
|
58 |
For the original training code see <a href='https://github.com/justinpinkney/stable-diffusion' target='_blank'>this repo</a>.
|
59 |
+
<img src='https://raw.githubusercontent.com/justinpinkney/stable-diffusion/main/assets/im-vars-thin.jpg' />
|
|
|
60 |
</p>
|
61 |
"""
|
62 |
|
63 |
article = \
|
64 |
"""
|
65 |
+
β
|
66 |
## How does this work?
|
67 |
The normal Stable Diffusion model is trained to be conditioned on text input. This version has had the original text encoder (from CLIP) removed, and replaced with
|
68 |
the CLIP _image_ encoder instead. So instead of generating images based a text input, images are generated to match CLIP's embedding of the image.
|