Spaces:
Runtime error
Runtime error
layout
Browse files
app.py
CHANGED
@@ -236,7 +236,7 @@ def create_image(
|
|
236 |
image=canny_map,
|
237 |
controlnet_conditioning_scale=float(control_scale),
|
238 |
)
|
239 |
-
return images
|
240 |
|
241 |
|
242 |
def pil_to_cv2(image_pil):
|
@@ -273,7 +273,7 @@ If our work is helpful for your research or applications, please cite us via:
|
|
273 |
If you have any questions, please feel free to open an issue or directly reach us out at <b>haofanwang.ai@gmail.com</b>.
|
274 |
"""
|
275 |
|
276 |
-
block = gr.Blocks(
|
277 |
with block:
|
278 |
# description
|
279 |
gr.Markdown(title)
|
@@ -285,27 +285,26 @@ with block:
|
|
285 |
with gr.Row():
|
286 |
with gr.Column():
|
287 |
image_pil = gr.Image(label="Style Image", type="pil")
|
|
|
|
|
|
|
|
|
|
|
288 |
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
"Load style+layout block",
|
293 |
-
"Load original IP-Adapter",
|
294 |
-
],
|
295 |
-
value="Load only style blocks",
|
296 |
-
label="Style mode",
|
297 |
-
)
|
298 |
-
|
299 |
-
prompt = gr.Textbox(
|
300 |
-
label="Prompt",
|
301 |
-
value="a cat, masterpiece, best quality, high quality",
|
302 |
-
)
|
303 |
-
|
304 |
-
scale = gr.Slider(
|
305 |
-
minimum=0, maximum=2.0, step=0.01, value=1.0, label="Scale"
|
306 |
-
)
|
307 |
|
308 |
with gr.Accordion(open=False, label="Advanced Options"):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
309 |
with gr.Column():
|
310 |
src_image_pil = gr.Image(
|
311 |
label="Source Image (optional)", type="pil"
|
@@ -335,10 +334,10 @@ with block:
|
|
335 |
)
|
336 |
|
337 |
guidance_scale = gr.Slider(
|
338 |
-
minimum=
|
339 |
-
maximum=
|
340 |
step=0.01,
|
341 |
-
value=
|
342 |
label="guidance scale",
|
343 |
)
|
344 |
num_inference_steps = gr.Slider(
|
@@ -359,26 +358,40 @@ with block:
|
|
359 |
generate_button = gr.Button("Generate Image")
|
360 |
|
361 |
with gr.Column():
|
362 |
-
generated_image = gr.
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
382 |
|
383 |
gr.Examples(
|
384 |
examples=examples,
|
@@ -390,4 +403,5 @@ with block:
|
|
390 |
|
391 |
gr.Markdown(article)
|
392 |
|
393 |
-
block.
|
|
|
|
236 |
image=canny_map,
|
237 |
controlnet_conditioning_scale=float(control_scale),
|
238 |
)
|
239 |
+
return images[0]
|
240 |
|
241 |
|
242 |
def pil_to_cv2(image_pil):
|
|
|
273 |
If you have any questions, please feel free to open an issue or directly reach us out at <b>haofanwang.ai@gmail.com</b>.
|
274 |
"""
|
275 |
|
276 |
+
block = gr.Blocks()
|
277 |
with block:
|
278 |
# description
|
279 |
gr.Markdown(title)
|
|
|
285 |
with gr.Row():
|
286 |
with gr.Column():
|
287 |
image_pil = gr.Image(label="Style Image", type="pil")
|
288 |
+
with gr.Column():
|
289 |
+
prompt = gr.Textbox(
|
290 |
+
label="Prompt",
|
291 |
+
value="a cat, masterpiece, best quality, high quality",
|
292 |
+
)
|
293 |
|
294 |
+
scale = gr.Slider(
|
295 |
+
minimum=0, maximum=2.0, step=0.01, value=1.0, label="Scale"
|
296 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
297 |
|
298 |
with gr.Accordion(open=False, label="Advanced Options"):
|
299 |
+
target = gr.Radio(
|
300 |
+
[
|
301 |
+
"Load only style blocks",
|
302 |
+
"Load style+layout block",
|
303 |
+
"Load original IP-Adapter",
|
304 |
+
],
|
305 |
+
value="Load only style blocks",
|
306 |
+
label="Style mode",
|
307 |
+
)
|
308 |
with gr.Column():
|
309 |
src_image_pil = gr.Image(
|
310 |
label="Source Image (optional)", type="pil"
|
|
|
334 |
)
|
335 |
|
336 |
guidance_scale = gr.Slider(
|
337 |
+
minimum=0,
|
338 |
+
maximum=10.0,
|
339 |
step=0.01,
|
340 |
+
value=0.0,
|
341 |
label="guidance scale",
|
342 |
)
|
343 |
num_inference_steps = gr.Slider(
|
|
|
358 |
generate_button = gr.Button("Generate Image")
|
359 |
|
360 |
with gr.Column():
|
361 |
+
generated_image = gr.Image(label="Generated Image")
|
362 |
+
|
363 |
+
inputs = [
|
364 |
+
image_pil,
|
365 |
+
src_image_pil,
|
366 |
+
prompt,
|
367 |
+
n_prompt,
|
368 |
+
scale,
|
369 |
+
control_scale,
|
370 |
+
guidance_scale,
|
371 |
+
num_inference_steps,
|
372 |
+
seed,
|
373 |
+
target,
|
374 |
+
neg_content_prompt,
|
375 |
+
neg_content_scale,
|
376 |
+
]
|
377 |
+
outputs = [generated_image]
|
378 |
+
|
379 |
+
gr.on(
|
380 |
+
triggers=[
|
381 |
+
prompt.input,
|
382 |
+
generate_button.click,
|
383 |
+
guidance_scale.input,
|
384 |
+
scale.input,
|
385 |
+
control_scale.input,
|
386 |
+
seed.input,
|
387 |
+
],
|
388 |
+
fn=create_image,
|
389 |
+
inputs=inputs,
|
390 |
+
outputs=outputs,
|
391 |
+
show_progress="minimal",
|
392 |
+
show_api=False,
|
393 |
+
trigger_mode="always_last",
|
394 |
+
)
|
395 |
|
396 |
gr.Examples(
|
397 |
examples=examples,
|
|
|
403 |
|
404 |
gr.Markdown(article)
|
405 |
|
406 |
+
block.queue(api_open=False)
|
407 |
+
block.launch(show_api=False)
|