Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -76,7 +76,7 @@ def add_overlay(base_img, overlay_img, margin=20):
|
|
| 76 |
|
| 77 |
|
| 78 |
@spaces.GPU
|
| 79 |
-
def infer(input_image, input_image_upload, overlay_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(track_tqdm=True)):
|
| 80 |
"""
|
| 81 |
Perform image editing using the FLUX.1 Kontext pipeline.
|
| 82 |
|
|
@@ -147,7 +147,7 @@ def infer(input_image, input_image_upload, overlay_image, prompt, seed=42, rando
|
|
| 147 |
generator=torch.Generator().manual_seed(seed),
|
| 148 |
).images[0]
|
| 149 |
|
| 150 |
-
return image, processed_input_image, seed, gr.Button(visible=
|
| 151 |
|
| 152 |
@spaces.GPU
|
| 153 |
def infer_example(input_image, prompt):
|
|
@@ -166,12 +166,12 @@ css=""
|
|
| 166 |
with gr.Blocks(css=css) as demo:
|
| 167 |
|
| 168 |
with gr.Column(elem_id="col-container"):
|
| 169 |
-
gr.Markdown(f"""# FLUX.1 Kontext [dev]
|
| 170 |
Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro], [[blog]](https://bfl.ai/announcements/flux-1-kontext-dev) [[model]](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev)
|
| 171 |
""")
|
| 172 |
with gr.Row():
|
| 173 |
with gr.Column():
|
| 174 |
-
gr.Markdown("Select/Upload/Draw a person ⬇️")
|
| 175 |
# input_image = gr.Image(label="Upload drawing", type="pil")
|
| 176 |
with gr.Row():
|
| 177 |
with gr.Tabs() as tabs:
|
|
@@ -191,7 +191,7 @@ Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro
|
|
| 191 |
)
|
| 192 |
|
| 193 |
with gr.Column():
|
| 194 |
-
gr.Markdown("Select/Upload a face photo ⬇️")
|
| 195 |
with gr.Row():
|
| 196 |
overlay_image = gr.Image(label="Upload face photo", type="pil")
|
| 197 |
gr.Examples(
|
|
@@ -200,7 +200,7 @@ Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro
|
|
| 200 |
)
|
| 201 |
|
| 202 |
with gr.Column():
|
| 203 |
-
gr.Markdown("Press “Run” to get results")
|
| 204 |
with gr.Row():
|
| 205 |
run_button = gr.Button("Run")
|
| 206 |
with gr.Accordion("Advanced Settings", open=False):
|
|
@@ -243,17 +243,17 @@ Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro
|
|
| 243 |
reuse_button = gr.Button("Reuse this image", visible=False)
|
| 244 |
|
| 245 |
|
| 246 |
-
examples = gr.Examples(
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
)
|
| 257 |
|
| 258 |
gr.on(
|
| 259 |
triggers=[run_button.click, prompt.submit],
|
|
@@ -261,10 +261,10 @@ Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro
|
|
| 261 |
inputs = [input_image, input_image_upload, overlay_image, prompt, seed, randomize_seed, guidance_scale, steps],
|
| 262 |
outputs = [result, result_input, seed, reuse_button]
|
| 263 |
)
|
| 264 |
-
reuse_button.click(
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
)
|
| 269 |
|
| 270 |
demo.launch(mcp_server=True)
|
|
|
|
| 76 |
|
| 77 |
|
| 78 |
@spaces.GPU
|
| 79 |
+
def infer(input_image, input_image_upload, overlay_image, prompt="make it real", seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(track_tqdm=True)):
|
| 80 |
"""
|
| 81 |
Perform image editing using the FLUX.1 Kontext pipeline.
|
| 82 |
|
|
|
|
| 147 |
generator=torch.Generator().manual_seed(seed),
|
| 148 |
).images[0]
|
| 149 |
|
| 150 |
+
return image, processed_input_image, seed, gr.Button(visible=False)
|
| 151 |
|
| 152 |
@spaces.GPU
|
| 153 |
def infer_example(input_image, prompt):
|
|
|
|
| 166 |
with gr.Blocks(css=css) as demo:
|
| 167 |
|
| 168 |
with gr.Column(elem_id="col-container"):
|
| 169 |
+
gr.Markdown(f"""# FLUX.1 Kontext [dev] + Draw2Photo LoRA
|
| 170 |
Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro], [[blog]](https://bfl.ai/announcements/flux-1-kontext-dev) [[model]](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev)
|
| 171 |
""")
|
| 172 |
with gr.Row():
|
| 173 |
with gr.Column():
|
| 174 |
+
gr.Markdown("Step 1. Select/Upload/Draw a person ⬇️")
|
| 175 |
# input_image = gr.Image(label="Upload drawing", type="pil")
|
| 176 |
with gr.Row():
|
| 177 |
with gr.Tabs() as tabs:
|
|
|
|
| 191 |
)
|
| 192 |
|
| 193 |
with gr.Column():
|
| 194 |
+
gr.Markdown("Step 1. Select/Upload a face photo ⬇️")
|
| 195 |
with gr.Row():
|
| 196 |
overlay_image = gr.Image(label="Upload face photo", type="pil")
|
| 197 |
gr.Examples(
|
|
|
|
| 200 |
)
|
| 201 |
|
| 202 |
with gr.Column():
|
| 203 |
+
gr.Markdown("Step 1. Press “Run” to get results")
|
| 204 |
with gr.Row():
|
| 205 |
run_button = gr.Button("Run")
|
| 206 |
with gr.Accordion("Advanced Settings", open=False):
|
|
|
|
| 243 |
reuse_button = gr.Button("Reuse this image", visible=False)
|
| 244 |
|
| 245 |
|
| 246 |
+
# examples = gr.Examples(
|
| 247 |
+
# examples=[
|
| 248 |
+
# ["flowers.png", "turn the flowers into sunflowers"],
|
| 249 |
+
# ["monster.png", "make this monster ride a skateboard on the beach"],
|
| 250 |
+
# ["cat.png", "make this cat happy"]
|
| 251 |
+
# ],
|
| 252 |
+
# inputs=[input_image_upload, prompt],
|
| 253 |
+
# outputs=[result, seed],
|
| 254 |
+
# fn=infer_example,
|
| 255 |
+
# cache_examples="lazy"
|
| 256 |
+
# )
|
| 257 |
|
| 258 |
gr.on(
|
| 259 |
triggers=[run_button.click, prompt.submit],
|
|
|
|
| 261 |
inputs = [input_image, input_image_upload, overlay_image, prompt, seed, randomize_seed, guidance_scale, steps],
|
| 262 |
outputs = [result, result_input, seed, reuse_button]
|
| 263 |
)
|
| 264 |
+
# reuse_button.click(
|
| 265 |
+
# fn = lambda image: image,
|
| 266 |
+
# inputs = [result],
|
| 267 |
+
# outputs = [input_image]
|
| 268 |
+
# )
|
| 269 |
|
| 270 |
demo.launch(mcp_server=True)
|