Spaces:
Running
on
Zero
Running
on
Zero
fix: preset prompt persistence
Browse files- app_local.py +62 -57
app_local.py
CHANGED
|
@@ -7,7 +7,6 @@ from PIL import Image
|
|
| 7 |
from diffusers import QwenImageEditPipeline, FlowMatchEulerDiscreteScheduler
|
| 8 |
from diffusers.utils import is_xformers_available
|
| 9 |
from presets import PRESETS, get_preset_choices, get_preset_info, update_preset_prompt
|
| 10 |
-
|
| 11 |
import os
|
| 12 |
import sys
|
| 13 |
import re
|
|
@@ -16,6 +15,8 @@ import math
|
|
| 16 |
import json # Added json import
|
| 17 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
| 18 |
import logging
|
|
|
|
|
|
|
| 19 |
#############################
|
| 20 |
os.environ.setdefault('GRADIO_ANALYTICS_ENABLED', 'False')
|
| 21 |
os.environ.setdefault('HF_HUB_DISABLE_TELEMETRY', '1')
|
|
@@ -26,14 +27,12 @@ logging.basicConfig(
|
|
| 26 |
handlers=[logging.StreamHandler(sys.stdout)]
|
| 27 |
)
|
| 28 |
logger = logging.getLogger(__name__)
|
| 29 |
-
|
| 30 |
# Model configuration
|
| 31 |
REWRITER_MODEL = "Qwen/Qwen1.5-4B-Chat" # Upgraded to 4B for better JSON handling
|
| 32 |
dtype = torch.bfloat16
|
| 33 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 34 |
MAX_SEED = np.iinfo(np.int32).max
|
| 35 |
LOC = os.getenv("QIE")
|
| 36 |
-
|
| 37 |
# Quantization configuration
|
| 38 |
bnb_config = BitsAndBytesConfig(
|
| 39 |
load_in_4bit=True,
|
|
@@ -41,7 +40,6 @@ bnb_config = BitsAndBytesConfig(
|
|
| 41 |
bnb_4bit_quant_type="nf4",
|
| 42 |
bnb_4bit_use_double_quant=True
|
| 43 |
)
|
| 44 |
-
|
| 45 |
rewriter_model = AutoModelForCausalLM.from_pretrained(
|
| 46 |
REWRITER_MODEL,
|
| 47 |
torch_dtype=dtype,
|
|
@@ -49,10 +47,16 @@ rewriter_model = AutoModelForCausalLM.from_pretrained(
|
|
| 49 |
quantization_config=bnb_config,
|
| 50 |
)
|
| 51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
# Preload enhancement model at startup
|
| 53 |
print("🔄 Loading prompt enhancement model...")
|
| 54 |
rewriter_tokenizer = AutoTokenizer.from_pretrained(REWRITER_MODEL)
|
| 55 |
-
|
| 56 |
print("✅ Enhancement model loaded and ready!")
|
| 57 |
|
| 58 |
SYSTEM_PROMPT_EDIT = '''
|
|
@@ -242,69 +246,78 @@ try:
|
|
| 242 |
except Exception as e:
|
| 243 |
print(f"VAE Slicing Failed: {e}")
|
| 244 |
|
| 245 |
-
|
| 246 |
def toggle_output_count(preset_type):
|
| 247 |
"""Control output count slider interactivity and show/hide preset editor"""
|
| 248 |
-
if preset_type and preset_type in
|
| 249 |
# When preset is selected, disable manual output count and show editor
|
| 250 |
-
preset =
|
| 251 |
prompts = preset["prompts"][:4] # Get up to 4 prompts
|
| 252 |
# Pad prompts to 4 items if needed
|
| 253 |
while len(prompts) < 4:
|
| 254 |
prompts.append("")
|
| 255 |
return (
|
| 256 |
-
gr.Group(visible=True),
|
| 257 |
gr.Slider(interactive=False, value=len([p for p in prompts if p.strip()])), # Count non-empty prompts
|
| 258 |
prompts[0], prompts[1], prompts[2], prompts[3] # Populate preset prompts
|
| 259 |
)
|
| 260 |
else:
|
| 261 |
# When no preset is selected, enable manual output count and hide editor
|
| 262 |
return (
|
| 263 |
-
gr.Group(visible=False),
|
| 264 |
gr.Slider(interactive=True), # Enable slider
|
| 265 |
"", "", "", "" # Clear preset prompts
|
| 266 |
)
|
| 267 |
-
|
| 268 |
def update_prompt_preview(preset_type, base_prompt):
|
| 269 |
"""Update the prompt preview display based on selected preset and base prompt"""
|
| 270 |
-
if preset_type and preset_type in
|
| 271 |
-
preset =
|
| 272 |
non_empty_prompts = [p for p in preset["prompts"] if p.strip()]
|
| 273 |
-
|
| 274 |
if not non_empty_prompts:
|
| 275 |
return "No prompts defined. Please enter at least one prompt in the editor."
|
| 276 |
-
|
| 277 |
preview_text = f"**Preset: {preset_type}**\n\n"
|
| 278 |
preview_text += f"*{preset['description']}*\n\n"
|
| 279 |
preview_text += f"**Generating {len(non_empty_prompts)} image{'s' if len(non_empty_prompts) > 1 else ''}:**\n"
|
| 280 |
-
|
| 281 |
for i, preset_prompt in enumerate(non_empty_prompts, 1):
|
| 282 |
full_prompt = f"{base_prompt}, {preset_prompt}"
|
| 283 |
preview_text += f"{i}. {full_prompt}\n"
|
| 284 |
-
|
| 285 |
return preview_text
|
| 286 |
else:
|
| 287 |
return "Select a preset above to see how your base prompt will be modified for batch generation."
|
| 288 |
|
| 289 |
def update_preset_prompt_textbox(preset_type, prompt_1, prompt_2, prompt_3, prompt_4):
|
| 290 |
-
"""Update preset prompts based on user input"""
|
| 291 |
-
if preset_type and preset_type in
|
| 292 |
-
# Update each prompt in the preset
|
| 293 |
new_prompts = [prompt_1, prompt_2, prompt_3, prompt_4]
|
|
|
|
|
|
|
| 294 |
for i, new_prompt in enumerate(new_prompts):
|
| 295 |
-
if i < len(
|
| 296 |
-
|
| 297 |
else:
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
# Count non-empty prompts for the slider value
|
| 301 |
-
non_empty_count = len([p for p in new_prompts if p.strip()])
|
| 302 |
-
non_empty_count = max(1, min(4, non_empty_count)) # Ensure between 1-4
|
| 303 |
-
|
| 304 |
# Return updated preset info for preview
|
| 305 |
-
return
|
| 306 |
return "Select a preset first to edit its prompts."
|
| 307 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 308 |
|
| 309 |
@spaces.GPU()
|
| 310 |
def infer(
|
|
@@ -316,10 +329,10 @@ def infer(
|
|
| 316 |
num_inference_steps=4,
|
| 317 |
rewrite_prompt=True,
|
| 318 |
num_images_per_prompt=1,
|
| 319 |
-
preset_type=None,
|
| 320 |
progress=gr.Progress(track_tqdm=True),
|
| 321 |
):
|
| 322 |
-
"""Image editing endpoint with optimized prompt handling"""
|
| 323 |
# Resize image to max 1024px on longest side
|
| 324 |
def resize_image(pil_image, max_size=1024):
|
| 325 |
"""Resize image to maximum dimension of 1024px while maintaining aspect ratio"""
|
|
@@ -341,7 +354,7 @@ def infer(
|
|
| 341 |
except Exception as e:
|
| 342 |
print(f"⚠️ Image resize failed: {e}")
|
| 343 |
return pil_image # Return original if resize fails
|
| 344 |
-
|
| 345 |
# Add noise function for batch variation
|
| 346 |
def add_noise_to_image(pil_image, noise_level=0.001):
|
| 347 |
"""Add slight noise to image to create variation in outputs"""
|
|
@@ -360,17 +373,19 @@ def infer(
|
|
| 360 |
print(f"Warning: Could not add noise to image: {e}")
|
| 361 |
return pil_image # Return original if noise addition fails
|
| 362 |
|
|
|
|
|
|
|
|
|
|
| 363 |
# Resize input image first
|
| 364 |
image = resize_image(image, max_size=1024)
|
| 365 |
original_prompt = prompt
|
| 366 |
prompt_info = ""
|
| 367 |
|
| 368 |
# Handle preset batch generation
|
| 369 |
-
if preset_type and preset_type in
|
| 370 |
-
preset =
|
| 371 |
# Filter out empty prompts
|
| 372 |
non_empty_preset_prompts = [p for p in preset["prompts"] if p.strip()]
|
| 373 |
-
|
| 374 |
if non_empty_preset_prompts:
|
| 375 |
batch_prompts = [f"{original_prompt}, {preset_prompt}" for preset_prompt in non_empty_preset_prompts]
|
| 376 |
num_images_per_prompt = len(non_empty_preset_prompts) # Use actual count of non-empty prompts
|
|
@@ -395,7 +410,6 @@ def infer(
|
|
| 395 |
)
|
| 396 |
else:
|
| 397 |
batch_prompts = [prompt] # Single prompt in list
|
| 398 |
-
|
| 399 |
# Handle regular prompt rewriting
|
| 400 |
if rewrite_prompt:
|
| 401 |
try:
|
|
@@ -435,25 +449,20 @@ def infer(
|
|
| 435 |
|
| 436 |
# Set base seed for reproducibility
|
| 437 |
base_seed = seed if not randomize_seed else random.randint(0, MAX_SEED)
|
| 438 |
-
|
| 439 |
try:
|
| 440 |
edited_images = []
|
| 441 |
-
|
| 442 |
# Generate images for each prompt in the batch
|
| 443 |
for i, current_prompt in enumerate(batch_prompts):
|
| 444 |
# Create unique seed for each image
|
| 445 |
generator = torch.Generator(device=device).manual_seed(base_seed + i*1000)
|
| 446 |
-
|
| 447 |
# Add slight noise to the image for variation (except for first image to maintain base)
|
| 448 |
-
if i == 0 and len(batch_prompts)
|
| 449 |
input_image = image
|
| 450 |
else:
|
| 451 |
input_image = add_noise_to_image(image, noise_level=0.001 + i*0.003)
|
| 452 |
-
|
| 453 |
# Slightly vary guidance scale for each image
|
| 454 |
varied_guidance = true_guidance_scale + random.uniform(-0.1, 0.1)
|
| 455 |
varied_guidance = max(1.0, min(10.0, varied_guidance))
|
| 456 |
-
|
| 457 |
# Generate single image
|
| 458 |
result = pipe(
|
| 459 |
image=input_image,
|
|
@@ -465,14 +474,11 @@ def infer(
|
|
| 465 |
num_images_per_prompt=1
|
| 466 |
).images
|
| 467 |
edited_images.extend(result)
|
| 468 |
-
|
| 469 |
print(f"Generated image {i+1}/{len(batch_prompts)} with prompt: {current_prompt[:75]}...")
|
| 470 |
-
|
| 471 |
# Clear cache after generation
|
| 472 |
# if device == "cuda":
|
| 473 |
# torch.cuda.empty_cache()
|
| 474 |
# gc.collect()
|
| 475 |
-
|
| 476 |
return edited_images, base_seed, prompt_info
|
| 477 |
except Exception as e:
|
| 478 |
# Clear cache on error
|
|
@@ -488,7 +494,8 @@ def infer(
|
|
| 488 |
)
|
| 489 |
|
| 490 |
with gr.Blocks(title="Qwen Image Edit - Fast Lightning Mode w/ Batch") as demo:
|
| 491 |
-
preset_prompts_state = gr.State(value=[
|
|
|
|
| 492 |
|
| 493 |
gr.Markdown("""
|
| 494 |
<div style="text-align: center; background: linear-gradient(to right, #3a7bd5, #00d2ff); color: white; padding: 20px; border-radius: 8px;">
|
|
@@ -602,39 +609,38 @@ with gr.Blocks(title="Qwen Image Edit - Fast Lightning Mode w/ Batch") as demo:
|
|
| 602 |
"Prompt details will appear after generation. Ability to edit Preset Prompts on the fly will be implemented shortly.</div>"
|
| 603 |
)
|
| 604 |
|
| 605 |
-
#
|
| 606 |
def show_preset_editor(preset_type):
|
| 607 |
-
if preset_type and preset_type in PRESETS
|
| 608 |
-
preset =
|
| 609 |
prompts = preset["prompts"]
|
| 610 |
# Pad prompts to 4 items if needed
|
| 611 |
while len(prompts) < 4:
|
| 612 |
prompts.append("")
|
| 613 |
return gr.Group(visible=True), prompts[0], prompts[1], prompts[2], prompts[3]
|
| 614 |
return gr.Group(visible=False), "", "", "", ""
|
| 615 |
-
|
|
|
|
| 616 |
def update_preset_count(preset_type, prompt_1, prompt_2, prompt_3, prompt_4):
|
| 617 |
"""Update the output count slider based on non-empty preset prompts"""
|
| 618 |
-
if preset_type and preset_type in PRESETS
|
| 619 |
non_empty_count = len([p for p in [prompt_1, prompt_2, prompt_3, prompt_4] if p.strip()])
|
| 620 |
return gr.Slider(value=max(1, min(4, non_empty_count)), interactive=False)
|
| 621 |
return gr.Slider(interactive=True)
|
| 622 |
|
| 623 |
-
#
|
| 624 |
preset_dropdown.change(
|
| 625 |
fn=toggle_output_count,
|
| 626 |
inputs=preset_dropdown,
|
| 627 |
outputs=[preset_editor, num_images_per_prompt, preset_prompt_1, preset_prompt_2, preset_prompt_3, preset_prompt_4]
|
| 628 |
)
|
| 629 |
|
| 630 |
-
# Set up prompt preview updates
|
| 631 |
preset_dropdown.change(
|
| 632 |
fn=update_prompt_preview,
|
| 633 |
inputs=[preset_dropdown, prompt],
|
| 634 |
outputs=prompt_preview
|
| 635 |
)
|
| 636 |
-
|
| 637 |
-
# Set up handlers for preset editor changes
|
| 638 |
preset_prompt_1.change(
|
| 639 |
fn=update_preset_count,
|
| 640 |
inputs=[preset_dropdown, preset_prompt_1, preset_prompt_2, preset_prompt_3, preset_prompt_4],
|
|
@@ -655,14 +661,13 @@ with gr.Blocks(title="Qwen Image Edit - Fast Lightning Mode w/ Batch") as demo:
|
|
| 655 |
inputs=[preset_dropdown, preset_prompt_1, preset_prompt_2, preset_prompt_3, preset_prompt_4],
|
| 656 |
outputs=num_images_per_prompt
|
| 657 |
)
|
| 658 |
-
|
| 659 |
prompt.change(
|
| 660 |
fn=update_prompt_preview,
|
| 661 |
inputs=[preset_dropdown, prompt],
|
| 662 |
outputs=prompt_preview
|
| 663 |
)
|
| 664 |
|
| 665 |
-
# Set up preset editor update button
|
| 666 |
update_preset_button.click(
|
| 667 |
fn=update_preset_prompt_textbox,
|
| 668 |
inputs=[preset_dropdown, preset_prompt_1, preset_prompt_2, preset_prompt_3, preset_prompt_4],
|
|
|
|
| 7 |
from diffusers import QwenImageEditPipeline, FlowMatchEulerDiscreteScheduler
|
| 8 |
from diffusers.utils import is_xformers_available
|
| 9 |
from presets import PRESETS, get_preset_choices, get_preset_info, update_preset_prompt
|
|
|
|
| 10 |
import os
|
| 11 |
import sys
|
| 12 |
import re
|
|
|
|
| 15 |
import json # Added json import
|
| 16 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
| 17 |
import logging
|
| 18 |
+
from copy import deepcopy
|
| 19 |
+
|
| 20 |
#############################
|
| 21 |
os.environ.setdefault('GRADIO_ANALYTICS_ENABLED', 'False')
|
| 22 |
os.environ.setdefault('HF_HUB_DISABLE_TELEMETRY', '1')
|
|
|
|
| 27 |
handlers=[logging.StreamHandler(sys.stdout)]
|
| 28 |
)
|
| 29 |
logger = logging.getLogger(__name__)
|
|
|
|
| 30 |
# Model configuration
|
| 31 |
REWRITER_MODEL = "Qwen/Qwen1.5-4B-Chat" # Upgraded to 4B for better JSON handling
|
| 32 |
dtype = torch.bfloat16
|
| 33 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 34 |
MAX_SEED = np.iinfo(np.int32).max
|
| 35 |
LOC = os.getenv("QIE")
|
|
|
|
| 36 |
# Quantization configuration
|
| 37 |
bnb_config = BitsAndBytesConfig(
|
| 38 |
load_in_4bit=True,
|
|
|
|
| 40 |
bnb_4bit_quant_type="nf4",
|
| 41 |
bnb_4bit_use_double_quant=True
|
| 42 |
)
|
|
|
|
| 43 |
rewriter_model = AutoModelForCausalLM.from_pretrained(
|
| 44 |
REWRITER_MODEL,
|
| 45 |
torch_dtype=dtype,
|
|
|
|
| 47 |
quantization_config=bnb_config,
|
| 48 |
)
|
| 49 |
|
| 50 |
+
def get_fresh_presets():
|
| 51 |
+
"""Return a fresh copy of presets to avoid persistence across users"""
|
| 52 |
+
return deepcopy(PRESETS)
|
| 53 |
+
|
| 54 |
+
# Store original presets for reference
|
| 55 |
+
ORIGINAL_PRESETS = deepcopy(PRESETS)
|
| 56 |
+
|
| 57 |
# Preload enhancement model at startup
|
| 58 |
print("🔄 Loading prompt enhancement model...")
|
| 59 |
rewriter_tokenizer = AutoTokenizer.from_pretrained(REWRITER_MODEL)
|
|
|
|
| 60 |
print("✅ Enhancement model loaded and ready!")
|
| 61 |
|
| 62 |
SYSTEM_PROMPT_EDIT = '''
|
|
|
|
| 246 |
except Exception as e:
|
| 247 |
print(f"VAE Slicing Failed: {e}")
|
| 248 |
|
| 249 |
+
|
| 250 |
def toggle_output_count(preset_type):
|
| 251 |
"""Control output count slider interactivity and show/hide preset editor"""
|
| 252 |
+
if preset_type and preset_type in ORIGINAL_PRESETS:
|
| 253 |
# When preset is selected, disable manual output count and show editor
|
| 254 |
+
preset = ORIGINAL_PRESETS[preset_type]
|
| 255 |
prompts = preset["prompts"][:4] # Get up to 4 prompts
|
| 256 |
# Pad prompts to 4 items if needed
|
| 257 |
while len(prompts) < 4:
|
| 258 |
prompts.append("")
|
| 259 |
return (
|
| 260 |
+
gr.Group(visible=True),
|
| 261 |
gr.Slider(interactive=False, value=len([p for p in prompts if p.strip()])), # Count non-empty prompts
|
| 262 |
prompts[0], prompts[1], prompts[2], prompts[3] # Populate preset prompts
|
| 263 |
)
|
| 264 |
else:
|
| 265 |
# When no preset is selected, enable manual output count and hide editor
|
| 266 |
return (
|
| 267 |
+
gr.Group(visible=False),
|
| 268 |
gr.Slider(interactive=True), # Enable slider
|
| 269 |
"", "", "", "" # Clear preset prompts
|
| 270 |
)
|
| 271 |
+
|
| 272 |
def update_prompt_preview(preset_type, base_prompt):
|
| 273 |
"""Update the prompt preview display based on selected preset and base prompt"""
|
| 274 |
+
if preset_type and preset_type in ORIGINAL_PRESETS:
|
| 275 |
+
preset = ORIGINAL_PRESETS[preset_type]
|
| 276 |
non_empty_prompts = [p for p in preset["prompts"] if p.strip()]
|
|
|
|
| 277 |
if not non_empty_prompts:
|
| 278 |
return "No prompts defined. Please enter at least one prompt in the editor."
|
|
|
|
| 279 |
preview_text = f"**Preset: {preset_type}**\n\n"
|
| 280 |
preview_text += f"*{preset['description']}*\n\n"
|
| 281 |
preview_text += f"**Generating {len(non_empty_prompts)} image{'s' if len(non_empty_prompts) > 1 else ''}:**\n"
|
|
|
|
| 282 |
for i, preset_prompt in enumerate(non_empty_prompts, 1):
|
| 283 |
full_prompt = f"{base_prompt}, {preset_prompt}"
|
| 284 |
preview_text += f"{i}. {full_prompt}\n"
|
|
|
|
| 285 |
return preview_text
|
| 286 |
else:
|
| 287 |
return "Select a preset above to see how your base prompt will be modified for batch generation."
|
| 288 |
|
| 289 |
def update_preset_prompt_textbox(preset_type, prompt_1, prompt_2, prompt_3, prompt_4):
|
| 290 |
+
"""Update preset prompts based on user input - now works with session copy"""
|
| 291 |
+
if preset_type and preset_type in ORIGINAL_PRESETS:
|
| 292 |
+
# Update each prompt in the preset copy (this won't persist globally)
|
| 293 |
new_prompts = [prompt_1, prompt_2, prompt_3, prompt_4]
|
| 294 |
+
# Create a working copy for preview purposes
|
| 295 |
+
working_presets = get_fresh_presets()
|
| 296 |
for i, new_prompt in enumerate(new_prompts):
|
| 297 |
+
if i < len(working_presets[preset_type]["prompts"]):
|
| 298 |
+
working_presets[preset_type]["prompts"][i] = new_prompt.strip()
|
| 299 |
else:
|
| 300 |
+
working_presets[preset_type]["prompts"].append(new_prompt.strip())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 301 |
# Return updated preset info for preview
|
| 302 |
+
return update_prompt_preview_with_presets(preset_type, "your subject", working_presets)
|
| 303 |
return "Select a preset first to edit its prompts."
|
| 304 |
|
| 305 |
+
def update_prompt_preview_with_presets(preset_type, base_prompt, custom_presets):
|
| 306 |
+
"""Update the prompt preview display with custom presets"""
|
| 307 |
+
if preset_type and preset_type in custom_presets:
|
| 308 |
+
preset = custom_presets[preset_type]
|
| 309 |
+
non_empty_prompts = [p for p in preset["prompts"] if p.strip()]
|
| 310 |
+
if not non_empty_prompts:
|
| 311 |
+
return "No prompts defined. Please enter at least one prompt in the editor."
|
| 312 |
+
preview_text = f"**Preset: {preset_type}**\n\n"
|
| 313 |
+
preview_text += f"*{preset['description']}*\n\n"
|
| 314 |
+
preview_text += f"**Generating {len(non_empty_prompts)} image{'s' if len(non_empty_prompts) > 1 else ''}:**\n"
|
| 315 |
+
for i, preset_prompt in enumerate(non_empty_prompts, 1):
|
| 316 |
+
full_prompt = f"{base_prompt}, {preset_prompt}"
|
| 317 |
+
preview_text += f"{i}. {full_prompt}\n"
|
| 318 |
+
return preview_text
|
| 319 |
+
else:
|
| 320 |
+
return "Select a preset above to see how your base prompt will be modified for batch generation."
|
| 321 |
|
| 322 |
@spaces.GPU()
|
| 323 |
def infer(
|
|
|
|
| 329 |
num_inference_steps=4,
|
| 330 |
rewrite_prompt=True,
|
| 331 |
num_images_per_prompt=1,
|
| 332 |
+
preset_type=None,
|
| 333 |
progress=gr.Progress(track_tqdm=True),
|
| 334 |
):
|
| 335 |
+
"""Image editing endpoint with optimized prompt handling - now uses fresh presets"""
|
| 336 |
# Resize image to max 1024px on longest side
|
| 337 |
def resize_image(pil_image, max_size=1024):
|
| 338 |
"""Resize image to maximum dimension of 1024px while maintaining aspect ratio"""
|
|
|
|
| 354 |
except Exception as e:
|
| 355 |
print(f"⚠️ Image resize failed: {e}")
|
| 356 |
return pil_image # Return original if resize fails
|
| 357 |
+
|
| 358 |
# Add noise function for batch variation
|
| 359 |
def add_noise_to_image(pil_image, noise_level=0.001):
|
| 360 |
"""Add slight noise to image to create variation in outputs"""
|
|
|
|
| 373 |
print(f"Warning: Could not add noise to image: {e}")
|
| 374 |
return pil_image # Return original if noise addition fails
|
| 375 |
|
| 376 |
+
# Get fresh presets for this session
|
| 377 |
+
session_presets = get_fresh_presets()
|
| 378 |
+
|
| 379 |
# Resize input image first
|
| 380 |
image = resize_image(image, max_size=1024)
|
| 381 |
original_prompt = prompt
|
| 382 |
prompt_info = ""
|
| 383 |
|
| 384 |
# Handle preset batch generation
|
| 385 |
+
if preset_type and preset_type in session_presets:
|
| 386 |
+
preset = session_presets[preset_type]
|
| 387 |
# Filter out empty prompts
|
| 388 |
non_empty_preset_prompts = [p for p in preset["prompts"] if p.strip()]
|
|
|
|
| 389 |
if non_empty_preset_prompts:
|
| 390 |
batch_prompts = [f"{original_prompt}, {preset_prompt}" for preset_prompt in non_empty_preset_prompts]
|
| 391 |
num_images_per_prompt = len(non_empty_preset_prompts) # Use actual count of non-empty prompts
|
|
|
|
| 410 |
)
|
| 411 |
else:
|
| 412 |
batch_prompts = [prompt] # Single prompt in list
|
|
|
|
| 413 |
# Handle regular prompt rewriting
|
| 414 |
if rewrite_prompt:
|
| 415 |
try:
|
|
|
|
| 449 |
|
| 450 |
# Set base seed for reproducibility
|
| 451 |
base_seed = seed if not randomize_seed else random.randint(0, MAX_SEED)
|
|
|
|
| 452 |
try:
|
| 453 |
edited_images = []
|
|
|
|
| 454 |
# Generate images for each prompt in the batch
|
| 455 |
for i, current_prompt in enumerate(batch_prompts):
|
| 456 |
# Create unique seed for each image
|
| 457 |
generator = torch.Generator(device=device).manual_seed(base_seed + i*1000)
|
|
|
|
| 458 |
# Add slight noise to the image for variation (except for first image to maintain base)
|
| 459 |
+
if i == 0 and len(batch_prompts) > 1:
|
| 460 |
input_image = image
|
| 461 |
else:
|
| 462 |
input_image = add_noise_to_image(image, noise_level=0.001 + i*0.003)
|
|
|
|
| 463 |
# Slightly vary guidance scale for each image
|
| 464 |
varied_guidance = true_guidance_scale + random.uniform(-0.1, 0.1)
|
| 465 |
varied_guidance = max(1.0, min(10.0, varied_guidance))
|
|
|
|
| 466 |
# Generate single image
|
| 467 |
result = pipe(
|
| 468 |
image=input_image,
|
|
|
|
| 474 |
num_images_per_prompt=1
|
| 475 |
).images
|
| 476 |
edited_images.extend(result)
|
|
|
|
| 477 |
print(f"Generated image {i+1}/{len(batch_prompts)} with prompt: {current_prompt[:75]}...")
|
|
|
|
| 478 |
# Clear cache after generation
|
| 479 |
# if device == "cuda":
|
| 480 |
# torch.cuda.empty_cache()
|
| 481 |
# gc.collect()
|
|
|
|
| 482 |
return edited_images, base_seed, prompt_info
|
| 483 |
except Exception as e:
|
| 484 |
# Clear cache on error
|
|
|
|
| 494 |
)
|
| 495 |
|
| 496 |
with gr.Blocks(title="Qwen Image Edit - Fast Lightning Mode w/ Batch") as demo:
|
| 497 |
+
preset_prompts_state = gr.State(value=[])
|
| 498 |
+
# preset_prompts_state = gr.State(value=["", "", "", ""])
|
| 499 |
|
| 500 |
gr.Markdown("""
|
| 501 |
<div style="text-align: center; background: linear-gradient(to right, #3a7bd5, #00d2ff); color: white; padding: 20px; border-radius: 8px;">
|
|
|
|
| 609 |
"Prompt details will appear after generation. Ability to edit Preset Prompts on the fly will be implemented shortly.</div>"
|
| 610 |
)
|
| 611 |
|
| 612 |
+
# Fix the show_preset_editor function to use ORIGINAL_PRESETS:
|
| 613 |
def show_preset_editor(preset_type):
|
| 614 |
+
if preset_type and preset_type in ORIGINAL_PRESETS: # Changed from PRESETS to ORIGINAL_PRESETS
|
| 615 |
+
preset = ORIGINAL_PRESETS[preset_type]
|
| 616 |
prompts = preset["prompts"]
|
| 617 |
# Pad prompts to 4 items if needed
|
| 618 |
while len(prompts) < 4:
|
| 619 |
prompts.append("")
|
| 620 |
return gr.Group(visible=True), prompts[0], prompts[1], prompts[2], prompts[3]
|
| 621 |
return gr.Group(visible=False), "", "", "", ""
|
| 622 |
+
|
| 623 |
+
# Fix the update_preset_count function to use ORIGINAL_PRESETS:
|
| 624 |
def update_preset_count(preset_type, prompt_1, prompt_2, prompt_3, prompt_4):
|
| 625 |
"""Update the output count slider based on non-empty preset prompts"""
|
| 626 |
+
if preset_type and preset_type in ORIGINAL_PRESETS: # Changed from PRESETS to ORIGINAL_PRESETS
|
| 627 |
non_empty_count = len([p for p in [prompt_1, prompt_2, prompt_3, prompt_4] if p.strip()])
|
| 628 |
return gr.Slider(value=max(1, min(4, non_empty_count)), interactive=False)
|
| 629 |
return gr.Slider(interactive=True)
|
| 630 |
|
| 631 |
+
# Update the preset_dropdown.change handlers to use ORIGINAL_PRESETS
|
| 632 |
preset_dropdown.change(
|
| 633 |
fn=toggle_output_count,
|
| 634 |
inputs=preset_dropdown,
|
| 635 |
outputs=[preset_editor, num_images_per_prompt, preset_prompt_1, preset_prompt_2, preset_prompt_3, preset_prompt_4]
|
| 636 |
)
|
| 637 |
|
|
|
|
| 638 |
preset_dropdown.change(
|
| 639 |
fn=update_prompt_preview,
|
| 640 |
inputs=[preset_dropdown, prompt],
|
| 641 |
outputs=prompt_preview
|
| 642 |
)
|
| 643 |
+
|
|
|
|
| 644 |
preset_prompt_1.change(
|
| 645 |
fn=update_preset_count,
|
| 646 |
inputs=[preset_dropdown, preset_prompt_1, preset_prompt_2, preset_prompt_3, preset_prompt_4],
|
|
|
|
| 661 |
inputs=[preset_dropdown, preset_prompt_1, preset_prompt_2, preset_prompt_3, preset_prompt_4],
|
| 662 |
outputs=num_images_per_prompt
|
| 663 |
)
|
| 664 |
+
|
| 665 |
prompt.change(
|
| 666 |
fn=update_prompt_preview,
|
| 667 |
inputs=[preset_dropdown, prompt],
|
| 668 |
outputs=prompt_preview
|
| 669 |
)
|
| 670 |
|
|
|
|
| 671 |
update_preset_button.click(
|
| 672 |
fn=update_preset_prompt_textbox,
|
| 673 |
inputs=[preset_dropdown, preset_prompt_1, preset_prompt_2, preset_prompt_3, preset_prompt_4],
|