Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -126,12 +126,12 @@ def classify_gallery(flux_loras):
|
|
| 126 |
sorted_gallery = sorted(flux_loras, key=lambda x: x.get("likes", 0), reverse=True)
|
| 127 |
return [(item["image"], item["title"]) for item in sorted_gallery], sorted_gallery
|
| 128 |
|
| 129 |
-
def infer_with_lora_wrapper(input_image, prompt, selected_index, custom_lora, seed=42, randomize_seed=False, guidance_scale=2.5, lora_scale=1.75, flux_loras=None, progress=gr.Progress(track_tqdm=True)):
|
| 130 |
"""Wrapper function to handle state serialization"""
|
| 131 |
-
return infer_with_lora(input_image, prompt, selected_index, custom_lora, seed, randomize_seed, guidance_scale, lora_scale, flux_loras, progress)
|
| 132 |
|
| 133 |
@spaces.GPU
|
| 134 |
-
def infer_with_lora(input_image, prompt, selected_index, custom_lora, seed=42, randomize_seed=False, guidance_scale=2.5, lora_scale=1.0, flux_loras=None, progress=gr.Progress(track_tqdm=True)):
|
| 135 |
"""Generate image with selected LoRA"""
|
| 136 |
global current_lora, pipe
|
| 137 |
|
|
@@ -181,6 +181,7 @@ def infer_with_lora(input_image, prompt, selected_index, custom_lora, seed=42, r
|
|
| 181 |
image=input_image,
|
| 182 |
prompt=prompt,
|
| 183 |
guidance_scale=guidance_scale,
|
|
|
|
| 184 |
generator=torch.Generator().manual_seed(seed),
|
| 185 |
).images[0]
|
| 186 |
|
|
@@ -291,6 +292,13 @@ with gr.Blocks(css=css) as demo:
|
|
| 291 |
step=1,
|
| 292 |
value=0,
|
| 293 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 294 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
| 295 |
guidance_scale = gr.Slider(
|
| 296 |
label="Guidance Scale",
|
|
@@ -328,7 +336,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 328 |
gr.on(
|
| 329 |
triggers=[run_button.click, prompt.submit],
|
| 330 |
fn=infer_with_lora_wrapper,
|
| 331 |
-
inputs=[input_image, prompt, selected_state, custom_loaded_lora, seed, randomize_seed, guidance_scale, lora_scale, gr_flux_loras],
|
| 332 |
outputs=[result, seed, reuse_button]
|
| 333 |
)
|
| 334 |
|
|
|
|
| 126 |
sorted_gallery = sorted(flux_loras, key=lambda x: x.get("likes", 0), reverse=True)
|
| 127 |
return [(item["image"], item["title"]) for item in sorted_gallery], sorted_gallery
|
| 128 |
|
| 129 |
+
def infer_with_lora_wrapper(input_image, prompt, selected_index, custom_lora, seed=42, randomize_seed=False, steps=28, guidance_scale=2.5, lora_scale=1.75, flux_loras=None, progress=gr.Progress(track_tqdm=True)):
|
| 130 |
"""Wrapper function to handle state serialization"""
|
| 131 |
+
return infer_with_lora(input_image, prompt, selected_index, custom_lora, seed, randomize_seed, steps, guidance_scale, lora_scale, flux_loras, progress)
|
| 132 |
|
| 133 |
@spaces.GPU
|
| 134 |
+
def infer_with_lora(input_image, prompt, selected_index, custom_lora, seed=42, randomize_seed=False, steps=28, guidance_scale=2.5, lora_scale=1.0, flux_loras=None, progress=gr.Progress(track_tqdm=True)):
|
| 135 |
"""Generate image with selected LoRA"""
|
| 136 |
global current_lora, pipe
|
| 137 |
|
|
|
|
| 181 |
image=input_image,
|
| 182 |
prompt=prompt,
|
| 183 |
guidance_scale=guidance_scale,
|
| 184 |
+
num_inference_steps=steps,
|
| 185 |
generator=torch.Generator().manual_seed(seed),
|
| 186 |
).images[0]
|
| 187 |
|
|
|
|
| 292 |
step=1,
|
| 293 |
value=0,
|
| 294 |
)
|
| 295 |
+
steps = gr.Slider(
|
| 296 |
+
label="Steps",
|
| 297 |
+
minimum=1,
|
| 298 |
+
maximum=40,
|
| 299 |
+
value=28,
|
| 300 |
+
step=1
|
| 301 |
+
)
|
| 302 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
| 303 |
guidance_scale = gr.Slider(
|
| 304 |
label="Guidance Scale",
|
|
|
|
| 336 |
gr.on(
|
| 337 |
triggers=[run_button.click, prompt.submit],
|
| 338 |
fn=infer_with_lora_wrapper,
|
| 339 |
+
inputs=[input_image, prompt, selected_state, custom_loaded_lora, seed, randomize_seed, steps, guidance_scale, lora_scale, gr_flux_loras],
|
| 340 |
outputs=[result, seed, reuse_button]
|
| 341 |
)
|
| 342 |
|