LPX55 commited on
Commit
60d9af8
·
verified ·
1 Parent(s): dfd6666

Update app_local.py

Browse files
Files changed (1) hide show
  1. app_local.py +8 -8
app_local.py CHANGED
@@ -171,7 +171,7 @@ def polish_prompt(original_prompt: str) -> str:
171
  **model_inputs,
172
  max_new_tokens=256,
173
  do_sample=True,
174
- temperature=0.5,
175
  top_p=0.8,
176
  repetition_penalty=1.1,
177
  no_repeat_ngram_size=3,
@@ -245,9 +245,9 @@ def infer(
245
  prompt,
246
  seed=42,
247
  randomize_seed=False,
248
- true_guidance_scale=1.0,
249
  num_inference_steps=8,
250
- rewrite_prompt=False,
251
  num_images_per_prompt=1,
252
  ):
253
  """Image editing endpoint with optimized prompt handling"""
@@ -284,7 +284,7 @@ def infer(
284
  return pil_image # Return original if resize fails
285
 
286
  # Add noise function for batch variation
287
- def add_noise_to_image(pil_image, noise_level=0.02):
288
  """Add slight noise to image to create variation in outputs"""
289
  try:
290
  if pil_image is None:
@@ -357,10 +357,10 @@ def infer(
357
  generator = torch.Generator(device=device).manual_seed(base_seed + i*1000)
358
 
359
  # Add slight noise to the image for variation
360
- noisy_image = add_noise_to_image(image, noise_level=0.01 + i*0.003)
361
 
362
  # Slightly vary guidance scale
363
- varied_guidance = true_guidance_scale + random.uniform(-0.2, 0.2)
364
  varied_guidance = max(1.0, min(10.0, varied_guidance))
365
 
366
  # Generate single image with variations
@@ -460,7 +460,7 @@ with gr.Blocks(title="Qwen Image Edit - Fast Lightning Mode w/ Batch") as demo:
460
  minimum=1.0,
461
  maximum=10.0,
462
  step=0.1,
463
- value=3.0
464
  )
465
  num_inference_steps = gr.Slider(
466
  label="Inference Steps",
@@ -487,7 +487,7 @@ with gr.Blocks(title="Qwen Image Edit - Fast Lightning Mode w/ Batch") as demo:
487
  preview=True
488
  )
489
  prompt_info = gr.HTML(
490
- value="<div style='padding:15px; background:#f8f9fa; border-radius:8px; margin-top:15px'>"
491
  "Prompt details will appear after generation</div>"
492
  )
493
 
 
171
  **model_inputs,
172
  max_new_tokens=256,
173
  do_sample=True,
174
+ temperature=0.7,
175
  top_p=0.8,
176
  repetition_penalty=1.1,
177
  no_repeat_ngram_size=3,
 
245
  prompt,
246
  seed=42,
247
  randomize_seed=False,
248
+ true_guidance_scale=4.0,
249
  num_inference_steps=8,
250
+ rewrite_prompt=True,
251
  num_images_per_prompt=1,
252
  ):
253
  """Image editing endpoint with optimized prompt handling"""
 
284
  return pil_image # Return original if resize fails
285
 
286
  # Add noise function for batch variation
287
+ def add_noise_to_image(pil_image, noise_level=0.05):
288
  """Add slight noise to image to create variation in outputs"""
289
  try:
290
  if pil_image is None:
 
357
  generator = torch.Generator(device=device).manual_seed(base_seed + i*1000)
358
 
359
  # Add slight noise to the image for variation
360
+ noisy_image = add_noise_to_image(image, noise_level=0.05 + i*0.003)
361
 
362
  # Slightly vary guidance scale
363
+ varied_guidance = true_guidance_scale + random.uniform(-0.5, 0.5)
364
  varied_guidance = max(1.0, min(10.0, varied_guidance))
365
 
366
  # Generate single image with variations
 
460
  minimum=1.0,
461
  maximum=10.0,
462
  step=0.1,
463
+ value=4.0
464
  )
465
  num_inference_steps = gr.Slider(
466
  label="Inference Steps",
 
487
  preview=True
488
  )
489
  prompt_info = gr.HTML(
490
+ value="<div style='padding:15px; margin-top:15px'>"
491
  "Prompt details will appear after generation</div>"
492
  )
493