Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -8,6 +8,96 @@ from torchvision import transforms as tfms
|
|
8 |
from transformers import CLIPTextModel, CLIPTokenizer, logging
|
9 |
from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel, DiffusionPipeline
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
torch_device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
|
12 |
if "mps" == torch_device: os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = "1"
|
13 |
|
@@ -104,19 +194,41 @@ examples = [
|
|
104 |
["A majestic castle on a floating island", 'Illustration Style', 10, 7.5, 42, 'Grayscale', 200]
|
105 |
]
|
106 |
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
|
122 |
demo.launch()
|
|
|
8 |
from transformers import CLIPTextModel, CLIPTokenizer, logging
|
9 |
from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel, DiffusionPipeline
|
10 |
|
11 |
+
HTML_TEMPLATE = """
|
12 |
+
<style>
|
13 |
+
body {
|
14 |
+
background: linear-gradient(135deg, #f5f7fa, #c3cfe2);
|
15 |
+
}
|
16 |
+
#app-header {
|
17 |
+
text-align: center;
|
18 |
+
background: rgba(255, 255, 255, 0.8);
|
19 |
+
padding: 20px;
|
20 |
+
border-radius: 10px;
|
21 |
+
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
|
22 |
+
position: relative;
|
23 |
+
}
|
24 |
+
#app-header h1 {
|
25 |
+
color: #4CAF50;
|
26 |
+
font-size: 2em;
|
27 |
+
margin-bottom: 10px;
|
28 |
+
}
|
29 |
+
.concept {
|
30 |
+
position: relative;
|
31 |
+
transition: transform 0.3s;
|
32 |
+
}
|
33 |
+
.concept:hover {
|
34 |
+
transform: scale(1.1);
|
35 |
+
}
|
36 |
+
.concept img {
|
37 |
+
width: 100px;
|
38 |
+
border-radius: 10px;
|
39 |
+
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
|
40 |
+
}
|
41 |
+
.concept-description {
|
42 |
+
position: absolute;
|
43 |
+
bottom: -30px;
|
44 |
+
left: 50%;
|
45 |
+
transform: translateX(-50%);
|
46 |
+
background-color: #4CAF50;
|
47 |
+
color: white;
|
48 |
+
padding: 5px 10px;
|
49 |
+
border-radius: 5px;
|
50 |
+
opacity: 0;
|
51 |
+
transition: opacity 0.3s;
|
52 |
+
}
|
53 |
+
.concept:hover .concept-description {
|
54 |
+
opacity: 1;
|
55 |
+
}
|
56 |
+
.artifact {
|
57 |
+
position: absolute;
|
58 |
+
background: rgba(76, 175, 80, 0.1);
|
59 |
+
border-radius: 50%;
|
60 |
+
}
|
61 |
+
.artifact.large {
|
62 |
+
width: 300px;
|
63 |
+
height: 300px;
|
64 |
+
top: -50px;
|
65 |
+
left: -150px;
|
66 |
+
}
|
67 |
+
.artifact.medium {
|
68 |
+
width: 200px;
|
69 |
+
height: 200px;
|
70 |
+
bottom: -50px;
|
71 |
+
right: -100px;
|
72 |
+
}
|
73 |
+
.artifact.small {
|
74 |
+
width: 100px;
|
75 |
+
height: 100px;
|
76 |
+
top: 50%;
|
77 |
+
left: 50%;
|
78 |
+
transform: translate(-50%, -50%);
|
79 |
+
}
|
80 |
+
</style>
|
81 |
+
<div id="app-header">
|
82 |
+
<div class="artifact large"></div>
|
83 |
+
<div class="artifact medium"></div>
|
84 |
+
<div class="artifact small"></div>
|
85 |
+
<h1>Generative Art with Textual Inversion and Guidance</h1>
|
86 |
+
<p>Generate unique art using different styles and guidance methods.</p>
|
87 |
+
<div style="display: flex; justify-content: center; gap: 20px; margin-top: 20px;">
|
88 |
+
<div class="concept">
|
89 |
+
<img src="https://example.com/illustration-style.jpg" alt="Illustration Style">
|
90 |
+
<div class="concept-description">Illustration Style</div>
|
91 |
+
</div>
|
92 |
+
<div class="concept">
|
93 |
+
<img src="https://example.com/line-art.jpg" alt="Line Art">
|
94 |
+
<div class="concept-description">Line Art</div>
|
95 |
+
</div>
|
96 |
+
<!-- Add more concepts here for each style in your style_token_dict -->
|
97 |
+
</div>
|
98 |
+
</div>
|
99 |
+
"""
|
100 |
+
|
101 |
torch_device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
|
102 |
if "mps" == torch_device: os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = "1"
|
103 |
|
|
|
194 |
["A majestic castle on a floating island", 'Illustration Style', 10, 7.5, 42, 'Grayscale', 200]
|
195 |
]
|
196 |
|
197 |
+
title = "Generative Art with Textual Inversion and Guidance"
|
198 |
+
description = "Create unique artworks using Stable Diffusion with various styles and guidance methods."
|
199 |
+
|
200 |
+
with gr.Blocks(css=HTML_TEMPLATE) as demo:
|
201 |
+
gr.HTML(HTML_TEMPLATE) # This adds the styled header to your app
|
202 |
+
with gr.Row():
|
203 |
+
text = gr.Textbox(label="Prompt", placeholder="Enter your creative prompt here...")
|
204 |
+
style = gr.Dropdown(label="Style", choices=list(style_token_dict.keys()), value="Illustration Style")
|
205 |
+
with gr.Row():
|
206 |
+
inference_step = gr.Slider(1, 50, 10, step=1, label="Inference steps")
|
207 |
+
guidance_scale = gr.Slider(1, 10, 7.5, step=0.1, label="Guidance scale")
|
208 |
+
seed = gr.Slider(0, 10000, 42, step=1, label="Seed")
|
209 |
+
with gr.Row():
|
210 |
+
guidance_method = gr.Dropdown(label="Guidance method", choices=['Grayscale', 'Bright', 'Contrast', 'Symmetry', 'Saturation'], value="Grayscale")
|
211 |
+
loss_scale = gr.Slider(100, 10000, 200, step=100, label="Loss scale")
|
212 |
+
with gr.Row():
|
213 |
+
generate_button = gr.Button("Generate Art")
|
214 |
+
with gr.Row():
|
215 |
+
output_image = gr.Image(width=512, height=512, label="Generated art")
|
216 |
+
output_image_guided = gr.Image(width=512, height=512, label="Generated art with guidance")
|
217 |
+
|
218 |
+
generate_button.click(
|
219 |
+
inference,
|
220 |
+
inputs=[text, style, inference_step, guidance_scale, seed, guidance_method, loss_scale],
|
221 |
+
outputs=[output_image, output_image_guided]
|
222 |
+
)
|
223 |
+
|
224 |
+
gr.Examples(
|
225 |
+
examples=[
|
226 |
+
["A majestic castle on a floating island", 'Illustration Style', 10, 7.5, 42, 'Grayscale', 200]
|
227 |
+
],
|
228 |
+
inputs=[text, style, inference_step, guidance_scale, seed, guidance_method, loss_scale],
|
229 |
+
outputs=[output_image, output_image_guided],
|
230 |
+
fn=inference,
|
231 |
+
cache_examples=True,
|
232 |
+
)
|
233 |
|
234 |
demo.launch()
|