DmitrMakeev commited on
Commit
15d6587
1 Parent(s): ffed24a

Create app_base.py

Browse files
Files changed (1) hide show
  1. app_base.py +276 -0
app_base.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import os
4
+
5
+ import gradio as gr
6
+ import PIL.Image
7
+ from diffusers.utils import load_image
8
+
9
+ from model import ADAPTER_NAMES, Model
10
+ from utils import (
11
+ DEFAULT_STYLE_NAME,
12
+ MAX_SEED,
13
+ STYLE_NAMES,
14
+ apply_style,
15
+ randomize_seed_fn,
16
+ )
17
+
18
+ CACHE_EXAMPLES = os.environ.get("CACHE_EXAMPLES") == "1"
19
+
20
+
21
+ def create_demo(model: Model) -> gr.Blocks:
22
+ def run(
23
+ image: PIL.Image.Image,
24
+ prompt: str,
25
+ negative_prompt: str,
26
+ adapter_name: str,
27
+ style_name: str = DEFAULT_STYLE_NAME,
28
+ num_inference_steps: int = 30,
29
+ guidance_scale: float = 5.0,
30
+ adapter_conditioning_scale: float = 1.0,
31
+ adapter_conditioning_factor: float = 1.0,
32
+ seed: int = 0,
33
+ apply_preprocess: bool = True,
34
+ progress=gr.Progress(track_tqdm=True),
35
+ ) -> list[PIL.Image.Image]:
36
+ prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
37
+
38
+ return model.run(
39
+ image=image,
40
+ prompt=prompt,
41
+ negative_prompt=negative_prompt,
42
+ adapter_name=adapter_name,
43
+ num_inference_steps=num_inference_steps,
44
+ guidance_scale=guidance_scale,
45
+ adapter_conditioning_scale=adapter_conditioning_scale,
46
+ adapter_conditioning_factor=adapter_conditioning_factor,
47
+ seed=seed,
48
+ apply_preprocess=apply_preprocess,
49
+ )
50
+
51
+ def process_example(
52
+ image_url: str,
53
+ prompt: str,
54
+ adapter_name: str,
55
+ guidance_scale: float,
56
+ adapter_conditioning_scale: float,
57
+ seed: int,
58
+ apply_preprocess: bool,
59
+ ) -> list[PIL.Image.Image]:
60
+ image = load_image(image_url)
61
+ return run(
62
+ image=image,
63
+ prompt=prompt,
64
+ negative_prompt="extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured",
65
+ adapter_name=adapter_name,
66
+ style_name="(No style)",
67
+ guidance_scale=guidance_scale,
68
+ adapter_conditioning_scale=adapter_conditioning_scale,
69
+ seed=seed,
70
+ apply_preprocess=apply_preprocess,
71
+ )
72
+
73
+ examples = [
74
+ [
75
+ "assets/org_canny.jpg",
76
+ "Mystical fairy in real, magic, 4k picture, high quality",
77
+ "canny",
78
+ 7.5,
79
+ 0.75,
80
+ 42,
81
+ True,
82
+ ],
83
+ [
84
+ "assets/org_sketch.png",
85
+ "a robot, mount fuji in the background, 4k photo, highly detailed",
86
+ "sketch",
87
+ 7.5,
88
+ 1.0,
89
+ 42,
90
+ True,
91
+ ],
92
+ [
93
+ "assets/org_lin.jpg",
94
+ "Ice dragon roar, 4k photo",
95
+ "lineart",
96
+ 7.5,
97
+ 0.8,
98
+ 42,
99
+ True,
100
+ ],
101
+ [
102
+ "assets/org_mid.jpg",
103
+ "A photo of a room, 4k photo, highly detailed",
104
+ "depth-midas",
105
+ 7.5,
106
+ 1.0,
107
+ 42,
108
+ True,
109
+ ],
110
+ [
111
+ "assets/org_zoe.jpg",
112
+ "A photo of a orchid, 4k photo, highly detailed",
113
+ "depth-zoe",
114
+ 5.0,
115
+ 1.0,
116
+ 42,
117
+ True,
118
+ ],
119
+ [
120
+ "assets/people.jpg",
121
+ "A couple, 4k photo, highly detailed",
122
+ "openpose",
123
+ 5.0,
124
+ 1.0,
125
+ 42,
126
+ True,
127
+ ],
128
+ [
129
+ "assets/depth-midas-image.png",
130
+ "stormtrooper lecture, 4k photo, highly detailed",
131
+ "depth-midas",
132
+ 7.5,
133
+ 1.0,
134
+ 42,
135
+ False,
136
+ ],
137
+ [
138
+ "assets/openpose-image.png",
139
+ "spiderman, 4k photo, highly detailed",
140
+ "openpose",
141
+ 5.0,
142
+ 1.0,
143
+ 42,
144
+ False,
145
+ ],
146
+ ]
147
+
148
+ with gr.Blocks() as demo:
149
+ with gr.Row():
150
+ with gr.Column():
151
+ with gr.Group():
152
+ image = gr.Image(label="Input image", type="pil", height=600)
153
+ prompt = gr.Textbox(label="Prompt")
154
+ with gr.Row():
155
+ adapter_name = gr.Dropdown(label="Adapter name", choices=ADAPTER_NAMES, value=ADAPTER_NAMES[0])
156
+ style = gr.Dropdown(label="Style", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
157
+ run_button = gr.Button("Run")
158
+ with gr.Accordion("Advanced options", open=False):
159
+ apply_preprocess = gr.Checkbox(label="Apply preprocess", value=True)
160
+ negative_prompt = gr.Textbox(
161
+ label="Negative prompt",
162
+ value=" extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured",
163
+ )
164
+ num_inference_steps = gr.Slider(
165
+ label="Number of steps",
166
+ minimum=1,
167
+ maximum=Model.MAX_NUM_INFERENCE_STEPS,
168
+ step=1,
169
+ value=25,
170
+ )
171
+ guidance_scale = gr.Slider(
172
+ label="Guidance scale",
173
+ minimum=0.1,
174
+ maximum=30.0,
175
+ step=0.1,
176
+ value=5.0,
177
+ )
178
+ adapter_conditioning_scale = gr.Slider(
179
+ label="Adapter conditioning scale",
180
+ minimum=0.5,
181
+ maximum=1,
182
+ step=0.1,
183
+ value=1.0,
184
+ )
185
+ adapter_conditioning_factor = gr.Slider(
186
+ label="Adapter conditioning factor",
187
+ info="Fraction of timesteps for which adapter should be applied",
188
+ minimum=0.5,
189
+ maximum=1.0,
190
+ step=0.1,
191
+ value=1.0,
192
+ )
193
+ seed = gr.Slider(
194
+ label="Seed",
195
+ minimum=0,
196
+ maximum=MAX_SEED,
197
+ step=1,
198
+ value=42,
199
+ )
200
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=False)
201
+ with gr.Column():
202
+ result = gr.Gallery(label="Result", columns=2, height=600, object_fit="scale-down", show_label=False)
203
+
204
+ gr.Examples(
205
+ examples=examples,
206
+ inputs=[
207
+ image,
208
+ prompt,
209
+ adapter_name,
210
+ guidance_scale,
211
+ adapter_conditioning_scale,
212
+ seed,
213
+ apply_preprocess,
214
+ ],
215
+ outputs=result,
216
+ fn=process_example,
217
+ cache_examples=CACHE_EXAMPLES,
218
+ )
219
+
220
+ inputs = [
221
+ image,
222
+ prompt,
223
+ negative_prompt,
224
+ adapter_name,
225
+ style,
226
+ num_inference_steps,
227
+ guidance_scale,
228
+ adapter_conditioning_scale,
229
+ adapter_conditioning_factor,
230
+ seed,
231
+ apply_preprocess,
232
+ ]
233
+ prompt.submit(
234
+ fn=randomize_seed_fn,
235
+ inputs=[seed, randomize_seed],
236
+ outputs=seed,
237
+ queue=False,
238
+ api_name=False,
239
+ ).then(
240
+ fn=run,
241
+ inputs=inputs,
242
+ outputs=result,
243
+ api_name=False,
244
+ )
245
+ negative_prompt.submit(
246
+ fn=randomize_seed_fn,
247
+ inputs=[seed, randomize_seed],
248
+ outputs=seed,
249
+ queue=False,
250
+ api_name=False,
251
+ ).then(
252
+ fn=run,
253
+ inputs=inputs,
254
+ outputs=result,
255
+ api_name=False,
256
+ )
257
+ run_button.click(
258
+ fn=randomize_seed_fn,
259
+ inputs=[seed, randomize_seed],
260
+ outputs=seed,
261
+ queue=False,
262
+ api_name=False,
263
+ ).then(
264
+ fn=run,
265
+ inputs=inputs,
266
+ outputs=result,
267
+ api_name="run",
268
+ )
269
+
270
+ return demo
271
+
272
+
273
+ if __name__ == "__main__":
274
+ model = Model(ADAPTER_NAMES[0])
275
+ demo = create_demo(model)
276
+ demo.queue(max_size=20).launch()