alexander00001 commited on
Commit
9e9fcb3
·
verified ·
1 Parent(s): e9cdb9a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +373 -344
app.py CHANGED
@@ -19,103 +19,184 @@ from typing import Optional, List, Dict
19
  import numpy as np
20
 
21
  # ======================
22
- # Configuration Section (Modify here to expand)
23
  # ======================
24
 
25
- # 1. Base Model - Using reliable SDXL models (avoiding incomplete "bait" models)
26
- BASE_MODELS = {
27
- "realistic":"John6666/wai-nsfw-illustrious-v80-sdxl",
28
- "sdxl_base": "stabilityai/stable-diffusion-xl-base-1.0", # Most reliable choice
29
- "realistic_vision": "SG161222/RealVisXL_V4.0", # High-quality realistic model
30
- "anime_xl": "Linaqruf/animagine-xl-3.1", # Popular anime-style SDXL
31
- "juggernaut_xl": "RunDiffusion/Juggernaut-XL-v9", # High-quality general purpose
32
- "playground_v2": "playgroundai/playground-v2.5-1024px-aesthetic" # Aesthetic focused
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  }
34
 
35
- # Current model selection (change this to switch models)
36
- CURRENT_MODEL_KEY = "realistic" # Changed to working model
37
- BASE_MODEL = BASE_MODELS[CURRENT_MODEL_KEY]
38
 
39
- # 2. Fixed LoRAs (Auto-loaded, not user-selectable) - Using actual LoRA models
40
  FIXED_LORAS = {
41
  "detail_enhancer": {
42
- "repo_id": "ostris/ikea-instructions-lora-sdxl", # Real LoRA for details
43
  "filename": None,
44
- "weight": 0.6,
45
- "trigger_words": "high quality, detailed, sharp focus"
 
46
  },
47
  "quality_boost": {
48
- "repo_id": "stabilityai/stable-diffusion-xl-offset-example-lora", # Official SDXL LoRA
49
  "filename": None,
50
- "weight": 0.5,
51
- "trigger_words": "masterpiece, best quality"
 
52
  }
53
  }
54
 
55
- # 3. Style Templates (Auto-prepended to user prompts)
56
  STYLE_PROMPTS = {
57
  "None": "",
58
- "Realistic": "photorealistic, ultra-detailed skin, natural lighting, 8k uhd, professional photography, DSLR, soft lighting, high quality, film grain, Fujifilm XT3, masterpiece, ",
59
- "Anime": "anime style, cel shading, vibrant colors, detailed eyes, studio ghibli style, manga style, trending on pixiv, masterpiece, ",
60
- "Comic": "comic book style, bold outlines, dynamic angles, comic panel, Marvel DC style, inked lines, pop art, masterpiece, ",
61
- "Watercolor": "watercolor painting, soft brush strokes, translucent layers, artistic, painterly, paper texture, traditional art, masterpiece, ",
 
 
62
  }
63
 
64
- # 4. Optional LoRAs (User-selectable via dropdown, can select multiple) - Using real, verified LoRAs
65
  OPTIONAL_LORAS = {
66
  "None": {
67
  "repo_id": None,
68
  "weight": 0.0,
69
  "trigger_words": "",
70
- "description": "No additional LoRA"
71
  },
72
- "Offset Noise LoRA": {
73
  "repo_id": "stabilityai/stable-diffusion-xl-offset-example-lora",
74
  "weight": 0.7,
75
  "trigger_words": "high contrast, dramatic lighting",
76
- "description": "Enhanced contrast and lighting (Official Stability AI)"
77
  },
78
  "LCM LoRA": {
79
  "repo_id": "latent-consistency/lcm-lora-sdxl",
80
  "weight": 0.8,
81
- "trigger_words": "lcm style, high quality",
82
- "description": "Latent Consistency Model for faster generation"
83
  },
84
- "Pixel Art LoRA": {
85
  "repo_id": "nerijs/pixel-art-xl",
86
  "weight": 0.9,
87
- "trigger_words": "pixel art style, 8bit, retro game",
88
- "description": "Pixel art style transformation"
89
  },
90
- "Watercolor LoRA": {
91
  "repo_id": "ostris/watercolor-style-lora-sdxl",
92
  "weight": 0.8,
93
- "trigger_words": "watercolor painting, soft colors, artistic",
94
- "description": "Watercolor painting style"
95
  },
96
- "Sketch LoRA": {
97
  "repo_id": "ostris/crayon-style-lora-sdxl",
98
  "weight": 0.7,
99
- "trigger_words": "sketch style, pencil drawing, artistic",
100
- "description": "Hand-drawn sketch style"
101
  },
102
- "Portrait LoRA": {
103
  "repo_id": "ostris/face-helper-sdxl-lora",
104
  "weight": 0.8,
105
  "trigger_words": "portrait, beautiful face, detailed eyes",
106
- "description": "Portrait and face enhancement"
107
  }
108
  }
109
 
110
- # Default Parameters
111
  DEFAULT_SEED = -1
112
  DEFAULT_WIDTH = 1024
113
  DEFAULT_HEIGHT = 1024
114
  DEFAULT_LORA_SCALE = 0.8
115
- DEFAULT_STEPS = 30
116
- DEFAULT_CFG = 7.5
117
 
118
- # Supported Languages (for future expansion)
119
  SUPPORTED_LANGUAGES = {
120
  "en": "English",
121
  "zh": "中文",
@@ -124,64 +205,75 @@ SUPPORTED_LANGUAGES = {
124
  }
125
 
126
  # ======================
127
- # Global Variables: Lazy Loading
128
  # ======================
129
  pipe = None
 
130
  current_loras = {}
131
  device = "cuda" if torch.cuda.is_available() else "cpu"
132
 
133
- def load_pipeline():
134
- """Load the base Illustrious XL pipeline with fallback options"""
135
- global pipe
136
- if pipe is None:
137
- print(f"🚀 Loading base model: {BASE_MODEL}...")
138
-
139
- # Try to load the selected model with fallback options
140
- model_loaded = False
141
- models_to_try = [BASE_MODEL]
142
-
143
- # Add fallback models if primary fails
144
- if CURRENT_MODEL_KEY != "sdxl_base":
145
- models_to_try.append(BASE_MODELS["sdxl_base"])
146
- if CURRENT_MODEL_KEY != "realistic_vision":
147
- models_to_try.append(BASE_MODELS["realistic_vision"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
 
149
- for model_id in models_to_try:
150
- try:
151
- print(f"Attempting to load: {model_id}")
152
- pipe = StableDiffusionXLPipeline.from_pretrained(
153
- model_id,
154
- torch_dtype=torch.float16,
155
- use_safetensors=True,
156
- variant="fp16"
157
- ).to(device)
158
-
159
- # Enable memory optimizations for ZeroGPU
160
- pipe.enable_attention_slicing()
161
- pipe.enable_vae_slicing()
162
- if hasattr(pipe, 'enable_model_cpu_offload'):
163
- pipe.enable_model_cpu_offload()
164
- if hasattr(pipe, 'enable_xformers_memory_efficient_attention'):
165
  pipe.enable_xformers_memory_efficient_attention()
166
-
167
- print(f" Successfully loaded: {model_id}")
168
- model_loaded = True
169
- break
170
-
171
- except Exception as e:
172
- print(f"❌ Failed to load {model_id}: {e}")
173
- continue
174
-
175
- if not model_loaded:
176
- raise Exception("Failed to load any model. Please check your configuration.")
177
 
178
- return pipe
 
 
 
 
 
 
 
179
 
180
  def unload_pipeline():
181
- """Unload pipeline to free memory"""
182
- global pipe, current_loras
183
  if pipe is not None:
184
- # Clear any loaded LoRAs
185
  try:
186
  pipe.unload_lora_weights()
187
  except:
@@ -190,16 +282,17 @@ def unload_pipeline():
190
  torch.cuda.empty_cache()
191
  pipe = None
192
  current_loras = {}
193
- print("🗑️ Pipeline unloaded.")
 
194
 
195
  def load_lora_weights(lora_configs: List[Dict]):
196
- """Load multiple LoRA weights efficiently with error handling"""
197
  global pipe, current_loras
198
 
199
  if not lora_configs:
200
  return
201
 
202
- # Unload existing LoRAs if different
203
  new_lora_ids = [config['repo_id'] for config in lora_configs if config['repo_id']]
204
  if set(current_loras.keys()) != set(new_lora_ids):
205
  try:
@@ -208,57 +301,48 @@ def load_lora_weights(lora_configs: List[Dict]):
208
  except:
209
  pass
210
 
211
- # Load new LoRAs with better error handling
212
  adapter_names = []
213
  adapter_weights = []
214
 
215
  for config in lora_configs:
216
  if config['repo_id'] and config['repo_id'] not in current_loras:
217
  try:
218
- # Try different loading methods
219
  adapter_name = config['name'].replace(' ', '_').lower()
220
-
221
- # Method 1: Direct loading
222
  pipe.load_lora_weights(
223
  config['repo_id'],
224
  adapter_name=adapter_name
225
  )
226
  current_loras[config['repo_id']] = adapter_name
227
- print(f"✅ Loaded LoRA: {config['name']}")
228
-
229
  except Exception as e:
230
- print(f"⚠️ Failed to load LoRA {config['name']}: {e}")
231
- # Skip this LoRA and continue with others
232
  continue
233
 
234
- # Add to active adapters if successfully loaded
235
  if config['repo_id'] in current_loras:
236
  adapter_names.append(current_loras[config['repo_id']])
237
  adapter_weights.append(config['weight'])
238
 
239
- # Set adapter weights if any adapters loaded
240
  if adapter_names:
241
  try:
242
  pipe.set_adapters(adapter_names, adapter_weights=adapter_weights)
243
- print(f"✅ Activated {len(adapter_names)} LoRA adapters")
244
  except Exception as e:
245
- print(f"⚠️ Warning setting adapter weights: {e}")
246
- # Try without weights
247
  try:
248
  pipe.set_adapters(adapter_names)
249
  except:
250
- print("❌ Failed to set any adapters")
251
 
252
  def process_long_prompt(prompt: str, max_length: int = 77) -> str:
253
- """Process long prompts by intelligent truncation and optimization"""
254
  if len(prompt.split()) <= max_length:
255
  return prompt
256
 
257
- # Split into sentences and prioritize
258
  sentences = re.split(r'[.!?]+', prompt)
259
  sentences = [s.strip() for s in sentences if s.strip()]
260
 
261
- # Keep most important parts (first sentence + key descriptors)
262
  if sentences:
263
  result = sentences[0]
264
  remaining = max_length - len(result.split())
@@ -269,7 +353,6 @@ def process_long_prompt(prompt: str, max_length: int = 77) -> str:
269
  result += ". " + sentence
270
  remaining -= len(words)
271
  else:
272
- # Add partial sentence with most important words
273
  important_words = [w for w in words if len(w) > 3][:remaining]
274
  if important_words:
275
  result += ". " + " ".join(important_words)
@@ -280,10 +363,11 @@ def process_long_prompt(prompt: str, max_length: int = 77) -> str:
280
  return " ".join(prompt.split()[:max_length])
281
 
282
  # ======================
283
- # Main Generation Function
284
  # ======================
285
  @spaces.GPU(duration=60) if SPACES_AVAILABLE else lambda x: x
286
  def generate_image(
 
287
  prompt: str,
288
  negative_prompt: str,
289
  style: str,
@@ -294,41 +378,48 @@ def generate_image(
294
  lora_scale: float,
295
  steps: int,
296
  cfg_scale: float,
 
297
  language: str = "en"
298
  ):
299
- """Main image generation function with ZeroGPU optimization"""
300
  global pipe
301
 
302
  try:
303
- # Load pipeline
304
- pipe = load_pipeline()
 
305
 
306
- # Handle seed
307
  if seed == -1:
308
  seed = torch.randint(0, 2**32, (1,)).item()
309
  generator = torch.Generator(device=device).manual_seed(seed)
310
 
311
- # Process prompts
312
  style_prefix = STYLE_PROMPTS.get(style, "")
313
  processed_prompt = process_long_prompt(style_prefix + prompt, max_length=150)
 
 
 
 
314
  processed_negative = process_long_prompt(negative_prompt, max_length=100)
315
 
316
- # Prepare LoRA configurations
317
  lora_configs = []
318
  active_trigger_words = []
319
 
320
- # Add fixed LoRAs
321
- for name, config in FIXED_LORAS.items():
322
- if config["repo_id"]:
323
- lora_configs.append({
324
- 'name': name,
325
- 'repo_id': config["repo_id"],
326
- 'weight': config["weight"]
327
- })
328
- if config["trigger_words"]:
329
- active_trigger_words.append(config["trigger_words"])
 
330
 
331
- # Add selected optional LoRAs
332
  for lora_name in selected_loras:
333
  if lora_name != "None" and lora_name in OPTIONAL_LORAS:
334
  config = OPTIONAL_LORAS[lora_name]
@@ -341,17 +432,17 @@ def generate_image(
341
  if config["trigger_words"]:
342
  active_trigger_words.append(config["trigger_words"])
343
 
344
- # Load LoRAs
345
  load_lora_weights(lora_configs)
346
 
347
- # Combine trigger words with prompt
348
  if active_trigger_words:
349
  trigger_text = ", ".join(active_trigger_words)
350
  final_prompt = f"{processed_prompt}, {trigger_text}"
351
  else:
352
  final_prompt = processed_prompt
353
 
354
- # Generate image
355
  with torch.autocast(device):
356
  image = pipe(
357
  prompt=final_prompt,
@@ -363,15 +454,17 @@ def generate_image(
363
  generator=generator,
364
  ).images[0]
365
 
366
- # Generate metadata
367
  timestamp = datetime.datetime.now()
368
  metadata = {
 
 
369
  "prompt": final_prompt,
370
  "original_prompt": prompt,
371
  "negative_prompt": processed_negative,
372
- "base_model": BASE_MODEL,
373
  "style": style,
374
- "fixed_loras": [name for name in FIXED_LORAS.keys()],
 
375
  "selected_loras": [name for name in selected_loras if name != "None"],
376
  "lora_scale": lora_scale,
377
  "seed": seed,
@@ -384,301 +477,237 @@ def generate_image(
384
  "trigger_words": active_trigger_words
385
  }
386
 
387
- # Generate filenames
388
- timestamp_str = timestamp.strftime("%y%m%d%H%M")
389
- filename_base = f"{seed}-{timestamp_str}"
390
-
391
- # Save image as WebP
392
- img_buffer = io.BytesIO()
393
- image.save(img_buffer, format="WEBP", quality=95, method=6)
394
- img_buffer.seek(0)
395
-
396
- # Save metadata as JSON
397
  metadata_str = json.dumps(metadata, indent=2, ensure_ascii=False)
398
 
399
  return (
400
  image,
401
- metadata_str
 
402
  )
403
 
404
  except Exception as e:
405
- error_msg = f"Generation failed: {str(e)}"
406
  print(f"❌ {error_msg}")
407
- return None, error_msg
408
 
409
  # ======================
410
- # Gradio Interface
411
  # ======================
412
  def create_interface():
413
- """Create the Gradio interface"""
414
 
415
  with gr.Blocks(
416
  theme=gr.themes.Soft(
417
- primary_hue="indigo",
418
- secondary_hue="green",
419
  neutral_hue="slate",
420
- ).set(
421
- body_background_fill="linear-gradient(135deg, #1e40af, #059669)",
422
- button_primary_background_fill="white",
423
- button_primary_text_color="#1e40af",
424
- input_background_fill="rgba(255,255,255,0.9)",
425
- block_background_fill="rgba(255,255,255,0.1)",
426
  ),
427
  css="""
428
- body {
429
- font-family: 'Segoe UI', 'Arial', sans-serif;
430
- background: linear-gradient(135deg, #1e40af, #059669);
431
- }
432
- .gr-button {
433
- font-family: 'Segoe UI', 'Arial', sans-serif;
434
- font-weight: 600;
435
- border-radius: 8px;
436
- }
437
- .gr-textbox {
438
- font-family: 'Consolas', 'Monaco', 'Courier New', monospace;
439
- border-radius: 8px;
440
- }
441
- .gr-dropdown, .gr-slider, .gr-radio {
442
- border-radius: 8px;
443
  }
444
- .gr-form {
445
  background: rgba(255,255,255,0.05);
446
- border-radius: 16px;
447
- padding: 20px;
448
- margin: 10px;
449
  }
450
  """,
451
- title="AI Photo Generator - Illustrious XL"
452
  ) as demo:
453
 
454
  gr.Markdown("""
455
- # 🎨 AI Photo Generator (Illustrious XL + Multi-LoRA)
456
- """)
 
457
 
458
  with gr.Row():
459
- # Left Column - Controls
460
- with gr.Column(scale=3, elem_classes=["gr-form"]):
461
-
462
- # a. Prompt Input
463
- prompt_input = gr.Textbox(
464
- label="Prompt (Positive)",
465
- placeholder="A beautiful woman with flowing hair, golden hour lighting, cinematic composition, high detail...",
466
- lines=6,
467
- max_lines=20,
468
- elem_classes=["gr-textbox"]
469
- )
470
 
471
- # b. Negative Prompt Input
472
- negative_prompt_input = gr.Textbox(
473
- label="Negative Prompt",
474
- value="blurry, low quality, deformed, cartoon, anime, text, watermark, signature, username, worst quality, low res, bad anatomy, bad hands, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, bad feet, extra fingers, mutated hands, poorly drawn hands, bad proportions, extra limbs, disfigured, ugly, gross proportions, malformed limbs",
475
- lines=4,
476
- max_lines=15,
477
- elem_classes=["gr-textbox"]
478
- )
 
 
479
 
480
- # c. Style Selection
481
- style_radio = gr.Radio(
482
- choices=list(STYLE_PROMPTS.keys()),
483
- label="Style Template",
484
- value="Realistic",
485
- elem_classes=["gr-radio"]
486
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
487
 
488
- # Multi-row controls
489
- with gr.Row():
490
- # d. Seed Control
491
- with gr.Column():
 
492
  seed_input = gr.Slider(
493
  minimum=-1,
494
  maximum=99999999,
495
  step=1,
496
  value=DEFAULT_SEED,
497
- label="Seed (-1 = Random)"
498
  )
499
- # seed_reset = gr.Button("Reset Seed", size="sm")
500
-
501
- with gr.Row():
502
- # e. Width Control
503
- with gr.Column():
504
  width_input = gr.Slider(
505
  minimum=512,
506
  maximum=1536,
507
  step=64,
508
  value=DEFAULT_WIDTH,
509
- label="Width"
510
  )
511
- # width_reset = gr.Button("Reset Width", size="sm")
512
-
513
- # f. Height Control
514
- with gr.Column():
515
  height_input = gr.Slider(
516
  minimum=512,
517
  maximum=1536,
518
  step=64,
519
  value=DEFAULT_HEIGHT,
520
- label="Height"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
521
  )
522
- # height_reset = gr.Button("Reset Height", size="sm")
523
-
524
- # g. LoRA Selection (Multi-select)
525
- lora_dropdown = gr.Dropdown(
526
- choices=list(OPTIONAL_LORAS.keys()),
527
- label="Optional LoRAs (Multi-select)",
528
- value=["None"],
529
- multiselect=True,
530
- elem_classes=["gr-dropdown"]
531
- )
532
 
533
- # h. LoRA Scale Control
534
- with gr.Row():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
535
  lora_scale_slider = gr.Slider(
536
  minimum=0.0,
537
  maximum=1.5,
538
  step=0.05,
539
  value=DEFAULT_LORA_SCALE,
540
- label="LoRA Scale"
541
  )
542
- # lora_reset = gr.Button("Reset LoRA", size="sm")
543
 
544
- # i. Generation Controls
545
- with gr.Row():
546
- steps_slider = gr.Slider(
547
- minimum=10,
548
- maximum=100,
549
- step=1,
550
- value=DEFAULT_STEPS,
551
- label="Steps"
552
- )
553
- cfg_slider = gr.Slider(
554
- minimum=1.0,
555
- maximum=20.0,
556
- step=0.1,
557
- value=DEFAULT_CFG,
558
- label="CFG Scale"
559
- )
560
- # gen_reset = gr.Button("Reset Generation", size="sm")
561
-
562
- # Language Selection (Optional)
563
- language_dropdown = gr.Dropdown(
564
- choices=list(SUPPORTED_LANGUAGES.keys()),
565
- label="Language (Optional)",
566
- value="en",
567
- visible=True # Hidden for now, can be enabled later
568
- )
569
-
570
- # m. Generate Button
571
  generate_btn = gr.Button(
572
- "✨ Generate Image",
573
  variant="primary",
574
- size="lg",
575
- elem_classes=["gr-button"]
 
 
 
 
 
576
  )
577
 
578
- # Right Column - Outputs
579
  with gr.Column(scale=2):
580
- # j. Image Display
581
  image_output = gr.Image(
582
- label="Generated Image",
583
- height=600,
584
  format="webp"
585
  )
586
 
587
- # Simplified UI without complex download buttons
588
- with gr.Row():
589
- gr.Markdown("**Right-click the image above to download**")
590
 
591
- # k. Metadata Display
592
  metadata_output = gr.Textbox(
593
- label="Generation Metadata (JSON)",
594
  lines=15,
595
- max_lines=25,
596
- elem_classes=["gr-textbox"]
597
  )
598
 
599
  # ======================
600
- # Event Handlers
601
  # ======================
602
 
603
- # Reset buttons
604
- # seed_reset.click(fn=lambda: -1, outputs=seed_input)
605
- # width_reset.click(fn=lambda: DEFAULT_WIDTH, outputs=width_input)
606
- # height_reset.click(fn=lambda: DEFAULT_HEIGHT, outputs=height_input)
607
- # lora_reset.click(fn=lambda: DEFAULT_LORA_SCALE, outputs=lora_scale_slider)
608
- # gen_reset.click(
609
- # fn=lambda: (DEFAULT_STEPS, DEFAULT_CFG),
610
- # outputs=[steps_slider, cfg_slider]
611
- # )
 
 
 
 
 
612
 
613
- # Main generation function
614
- def generate_and_prepare_downloads(*args):
615
- result = generate_image(*args)
616
- if result[0] is not None: # Success
617
- image, metadata, img_filename, meta_filename = result
618
-
619
- # Save files temporarily for download
620
- import tempfile
621
- import os
622
-
623
- # Create temporary files
624
- temp_dir = tempfile.mkdtemp()
625
- img_path = os.path.join(temp_dir, img_filename)
626
- meta_path = os.path.join(temp_dir, meta_filename)
627
-
628
- # Save image
629
- image.save(img_path, format="WEBP", quality=95)
630
-
631
- # Save metadata
632
- with open(meta_path, 'w', encoding='utf-8') as f:
633
- f.write(metadata)
634
-
635
- return (
636
- image,
637
- metadata,
638
- img_path, # File path for download
639
- meta_path # File path for download
640
- )
641
- else: # Error
642
- return result[0], result[1], None, None
643
 
644
- # Generate button click - Simplified without complex downloads
645
  generate_btn.click(
646
  fn=generate_image,
647
  inputs=[
648
- prompt_input, negative_prompt_input, style_radio,
649
  seed_input, width_input, height_input,
650
  lora_dropdown, lora_scale_slider,
651
- steps_slider, cfg_slider, language_dropdown
 
652
  ],
653
  outputs=[
654
- image_output, metadata_output
655
  ]
656
  )
657
-
658
- # Show LoRA descriptions
659
- def show_lora_info(selected_loras):
660
- if not selected_loras or selected_loras == ["None"]:
661
- return "No LoRAs selected"
662
-
663
- info = "Selected LoRAs:\n"
664
- for lora_name in selected_loras:
665
- if lora_name in OPTIONAL_LORAS:
666
- config = OPTIONAL_LORAS[lora_name]
667
- info += f"• {lora_name}: {config['description']}\n"
668
- if config['trigger_words']:
669
- info += f" Triggers: {config['trigger_words']}\n"
670
- return info
671
-
672
- lora_dropdown.change(
673
- fn=show_lora_info,
674
- inputs=[lora_dropdown],
675
- outputs=[gr.Textbox(label="LoRA Information", visible=False)]
676
- )
677
 
678
  return demo
679
 
680
  # ======================
681
- # Launch Application
682
  # ======================
683
  if __name__ == "__main__":
684
  demo = create_interface()
 
19
  import numpy as np
20
 
21
  # ======================
22
+ # Configuration Section - 灵活模型配置
23
  # ======================
24
 
25
+ # 1. 模型配置字典 - 支持多种模型类型
26
+ MODEL_CONFIGS = {
27
+ "wai_nsfw_illustrious_v80": {
28
+ "repo_id": "John6666/wai-nsfw-illustrious-v80-sdxl",
29
+ "type": "sdxl", # SDXL架构
30
+ "requires_safety_checker": False,
31
+ "default_negative": "lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry",
32
+ "optimal_settings": {
33
+ "steps": 28,
34
+ "cfg": 7.0,
35
+ "sampler": "DPM++ 2M Karras"
36
+ },
37
+ "description": "WAI NSFW Illustrious v8.0 - 高质量插画风格模型"
38
+ },
39
+ "wai_nsfw_illustrious_v90": {
40
+ "repo_id": "John6666/wai-nsfw-illustrious-v90-sdxl",
41
+ "type": "sdxl",
42
+ "requires_safety_checker": False,
43
+ "default_negative": "lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry",
44
+ "optimal_settings": {
45
+ "steps": 28,
46
+ "cfg": 7.0,
47
+ "sampler": "DPM++ 2M Karras"
48
+ },
49
+ "description": "WAI NSFW Illustrious v9.0 - 最新版本"
50
+ },
51
+ "wai_nsfw_illustrious_v110": {
52
+ "repo_id": "John6666/wai-nsfw-illustrious-v110-sdxl",
53
+ "type": "sdxl",
54
+ "requires_safety_checker": False,
55
+ "default_negative": "lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry",
56
+ "optimal_settings": {
57
+ "steps": 30,
58
+ "cfg": 7.5,
59
+ "sampler": "DPM++ 2M Karras"
60
+ },
61
+ "description": "WAI NSFW Illustrious v11.0 - 增强版本"
62
+ },
63
+ "sdxl_base": {
64
+ "repo_id": "stabilityai/stable-diffusion-xl-base-1.0",
65
+ "type": "sdxl",
66
+ "requires_safety_checker": True,
67
+ "default_negative": "blurry, low quality, deformed, cartoon, anime, text, watermark, signature, username, worst quality, low res, bad anatomy, bad hands",
68
+ "optimal_settings": {
69
+ "steps": 30,
70
+ "cfg": 7.5,
71
+ "sampler": "Default"
72
+ },
73
+ "description": "Stable Diffusion XL Base 1.0 - 官方基础模型"
74
+ },
75
+ "realistic_vision": {
76
+ "repo_id": "SG161222/RealVisXL_V4.0",
77
+ "type": "sdxl",
78
+ "requires_safety_checker": False,
79
+ "default_negative": "blurry, low quality, deformed, text, watermark, signature, worst quality, bad anatomy",
80
+ "optimal_settings": {
81
+ "steps": 30,
82
+ "cfg": 7.5,
83
+ "sampler": "Default"
84
+ },
85
+ "description": "RealVisXL V4.0 - 高质量写实风格"
86
+ },
87
+ "anime_xl": {
88
+ "repo_id": "Linaqruf/animagine-xl-3.1",
89
+ "type": "sdxl",
90
+ "requires_safety_checker": False,
91
+ "default_negative": "lowres, bad anatomy, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated",
92
+ "optimal_settings": {
93
+ "steps": 28,
94
+ "cfg": 7.0,
95
+ "sampler": "Default"
96
+ },
97
+ "description": "Animagine XL 3.1 - 动漫风格"
98
+ },
99
+ "juggernaut_xl": {
100
+ "repo_id": "RunDiffusion/Juggernaut-XL-v9",
101
+ "type": "sdxl",
102
+ "requires_safety_checker": False,
103
+ "default_negative": "blurry, low quality, text, watermark, signature, worst quality",
104
+ "optimal_settings": {
105
+ "steps": 30,
106
+ "cfg": 7.5,
107
+ "sampler": "Default"
108
+ },
109
+ "description": "Juggernaut XL v9 - 通用高质量模型"
110
+ }
111
  }
112
 
113
+ # 默认使用的模型 - 可以通过UI切换
114
+ DEFAULT_MODEL_KEY = "wai_nsfw_illustrious_v80"
 
115
 
116
+ # 2. 固定LoRA配置 - 自动加载
117
  FIXED_LORAS = {
118
  "detail_enhancer": {
119
+ "repo_id": "ostris/ikea-instructions-lora-sdxl",
120
  "filename": None,
121
+ "weight": 0.5, # 降低权重避免过度影响
122
+ "trigger_words": "high quality, detailed",
123
+ "enabled": True # 可以禁用
124
  },
125
  "quality_boost": {
126
+ "repo_id": "stabilityai/stable-diffusion-xl-offset-example-lora",
127
  "filename": None,
128
+ "weight": 0.4,
129
+ "trigger_words": "masterpiece, best quality",
130
+ "enabled": True
131
  }
132
  }
133
 
134
+ # 3. 风格模板 - 根据不同模型优化
135
  STYLE_PROMPTS = {
136
  "None": "",
137
+ "Realistic Photo": "photorealistic, ultra-detailed, natural lighting, 8k uhd, professional photography, DSLR, high quality, masterpiece, ",
138
+ "Anime/Illustration": "anime style, high quality illustration, vibrant colors, detailed, masterpiece, best quality, ",
139
+ "Artistic Illustration": "artistic illustration, painterly, detailed artwork, high quality, professional illustration, ",
140
+ "Comic Book": "comic book style, bold lines, dynamic composition, pop art, high quality, ",
141
+ "Watercolor": "watercolor painting, soft brush strokes, artistic, traditional art, masterpiece, ",
142
+ "Cinematic": "cinematic lighting, dramatic atmosphere, film grain, professional color grading, high quality, ",
143
  }
144
 
145
+ # 4. 可选LoRA配置 - 用户可选择
146
  OPTIONAL_LORAS = {
147
  "None": {
148
  "repo_id": None,
149
  "weight": 0.0,
150
  "trigger_words": "",
151
+ "description": "不使用额外LoRA"
152
  },
153
+ "Offset Noise": {
154
  "repo_id": "stabilityai/stable-diffusion-xl-offset-example-lora",
155
  "weight": 0.7,
156
  "trigger_words": "high contrast, dramatic lighting",
157
+ "description": "增强对比度和光照效果"
158
  },
159
  "LCM LoRA": {
160
  "repo_id": "latent-consistency/lcm-lora-sdxl",
161
  "weight": 0.8,
162
+ "trigger_words": "high quality",
163
+ "description": "快速生成模式"
164
  },
165
+ "Pixel Art": {
166
  "repo_id": "nerijs/pixel-art-xl",
167
  "weight": 0.9,
168
+ "trigger_words": "pixel art style, 8bit, retro",
169
+ "description": "像素艺术风格"
170
  },
171
+ "Watercolor": {
172
  "repo_id": "ostris/watercolor-style-lora-sdxl",
173
  "weight": 0.8,
174
+ "trigger_words": "watercolor painting, soft colors",
175
+ "description": "水彩画风格"
176
  },
177
+ "Sketch": {
178
  "repo_id": "ostris/crayon-style-lora-sdxl",
179
  "weight": 0.7,
180
+ "trigger_words": "sketch style, pencil drawing",
181
+ "description": "素描风格"
182
  },
183
+ "Portrait": {
184
  "repo_id": "ostris/face-helper-sdxl-lora",
185
  "weight": 0.8,
186
  "trigger_words": "portrait, beautiful face, detailed eyes",
187
+ "description": "肖像和面部增强"
188
  }
189
  }
190
 
191
+ # 默认参数
192
  DEFAULT_SEED = -1
193
  DEFAULT_WIDTH = 1024
194
  DEFAULT_HEIGHT = 1024
195
  DEFAULT_LORA_SCALE = 0.8
196
+ DEFAULT_STEPS = 28
197
+ DEFAULT_CFG = 7.0
198
 
199
+ # 支持的语言
200
  SUPPORTED_LANGUAGES = {
201
  "en": "English",
202
  "zh": "中文",
 
205
  }
206
 
207
  # ======================
208
+ # 全局变量: 懒加载
209
  # ======================
210
  pipe = None
211
+ current_model_key = None
212
  current_loras = {}
213
  device = "cuda" if torch.cuda.is_available() else "cpu"
214
 
215
+ def load_pipeline(model_key: str = None):
216
+ """灵活加载pipeline,支持不同模型"""
217
+ global pipe, current_model_key
218
+
219
+ if model_key is None:
220
+ model_key = DEFAULT_MODEL_KEY
221
+
222
+ # 如果模型已加载且是同一个,直接返回
223
+ if pipe is not None and current_model_key == model_key:
224
+ return pipe
225
+
226
+ # 卸载旧模型
227
+ if pipe is not None:
228
+ unload_pipeline()
229
+
230
+ model_config = MODEL_CONFIGS.get(model_key)
231
+ if not model_config:
232
+ raise ValueError(f"未知的模型配置: {model_key}")
233
+
234
+ print(f"🚀 加载模型: {model_config['description']} ({model_config['repo_id']})")
235
+
236
+ try:
237
+ # 加载SDXL类型的模型
238
+ if model_config["type"] == "sdxl":
239
+ pipe = StableDiffusionXLPipeline.from_pretrained(
240
+ model_config["repo_id"],
241
+ torch_dtype=torch.float16,
242
+ use_safetensors=True,
243
+ variant="fp16",
244
+ safety_checker=None if not model_config["requires_safety_checker"] else "default"
245
+ ).to(device)
246
 
247
+ # 内存优化
248
+ pipe.enable_attention_slicing()
249
+ pipe.enable_vae_slicing()
250
+ if hasattr(pipe, 'enable_model_cpu_offload'):
251
+ pipe.enable_model_cpu_offload()
252
+ if hasattr(pipe, 'enable_xformers_memory_efficient_attention'):
253
+ try:
 
 
 
 
 
 
 
 
 
254
  pipe.enable_xformers_memory_efficient_attention()
255
+ except:
256
+ print("⚠️ xformers不可用,跳过")
257
+
258
+ current_model_key = model_key
259
+ print(f"✅ 成功加载模型: {model_config['description']}")
260
+ return pipe
261
+ else:
262
+ raise ValueError(f"不支持的模型类型: {model_config['type']}")
 
 
 
263
 
264
+ except Exception as e:
265
+ print(f"❌ 加载模型失败: {e}")
266
+ # 尝试加载备用模型
267
+ if model_key != "sdxl_base":
268
+ print("🔄 尝试加载备用模型...")
269
+ return load_pipeline("sdxl_base")
270
+ else:
271
+ raise Exception("无法加载任何模型")
272
 
273
  def unload_pipeline():
274
+ """卸载pipeline释放内存"""
275
+ global pipe, current_loras, current_model_key
276
  if pipe is not None:
 
277
  try:
278
  pipe.unload_lora_weights()
279
  except:
 
282
  torch.cuda.empty_cache()
283
  pipe = None
284
  current_loras = {}
285
+ current_model_key = None
286
+ print("🗑️ Pipeline已卸载")
287
 
288
  def load_lora_weights(lora_configs: List[Dict]):
289
+ """加载多个LoRA权重,带错误处理"""
290
  global pipe, current_loras
291
 
292
  if not lora_configs:
293
  return
294
 
295
+ # 卸载现有LoRA
296
  new_lora_ids = [config['repo_id'] for config in lora_configs if config['repo_id']]
297
  if set(current_loras.keys()) != set(new_lora_ids):
298
  try:
 
301
  except:
302
  pass
303
 
304
+ # 加载新LoRA
305
  adapter_names = []
306
  adapter_weights = []
307
 
308
  for config in lora_configs:
309
  if config['repo_id'] and config['repo_id'] not in current_loras:
310
  try:
 
311
  adapter_name = config['name'].replace(' ', '_').lower()
 
 
312
  pipe.load_lora_weights(
313
  config['repo_id'],
314
  adapter_name=adapter_name
315
  )
316
  current_loras[config['repo_id']] = adapter_name
317
+ print(f"✅ 加载LoRA: {config['name']}")
 
318
  except Exception as e:
319
+ print(f"⚠️ LoRA加载失败 {config['name']}: {e}")
 
320
  continue
321
 
 
322
  if config['repo_id'] in current_loras:
323
  adapter_names.append(current_loras[config['repo_id']])
324
  adapter_weights.append(config['weight'])
325
 
326
+ # 设置adapter权重
327
  if adapter_names:
328
  try:
329
  pipe.set_adapters(adapter_names, adapter_weights=adapter_weights)
330
+ print(f"✅ 激活了 {len(adapter_names)} LoRA")
331
  except Exception as e:
332
+ print(f"⚠️ 设置adapter权重警告: {e}")
 
333
  try:
334
  pipe.set_adapters(adapter_names)
335
  except:
336
+ print("❌ 无法设置任何adapter")
337
 
338
  def process_long_prompt(prompt: str, max_length: int = 77) -> str:
339
+ """处理长提示词"""
340
  if len(prompt.split()) <= max_length:
341
  return prompt
342
 
 
343
  sentences = re.split(r'[.!?]+', prompt)
344
  sentences = [s.strip() for s in sentences if s.strip()]
345
 
 
346
  if sentences:
347
  result = sentences[0]
348
  remaining = max_length - len(result.split())
 
353
  result += ". " + sentence
354
  remaining -= len(words)
355
  else:
 
356
  important_words = [w for w in words if len(w) > 3][:remaining]
357
  if important_words:
358
  result += ". " + " ".join(important_words)
 
363
  return " ".join(prompt.split()[:max_length])
364
 
365
  # ======================
366
+ # 主生成函数
367
  # ======================
368
  @spaces.GPU(duration=60) if SPACES_AVAILABLE else lambda x: x
369
  def generate_image(
370
+ model_key: str,
371
  prompt: str,
372
  negative_prompt: str,
373
  style: str,
 
378
  lora_scale: float,
379
  steps: int,
380
  cfg_scale: float,
381
+ use_fixed_loras: bool,
382
  language: str = "en"
383
  ):
384
+ """主图像生成函数,支持ZeroGPU优化"""
385
  global pipe
386
 
387
  try:
388
+ # 加载指定模型
389
+ pipe = load_pipeline(model_key)
390
+ model_config = MODEL_CONFIGS[model_key]
391
 
392
+ # 处理种子
393
  if seed == -1:
394
  seed = torch.randint(0, 2**32, (1,)).item()
395
  generator = torch.Generator(device=device).manual_seed(seed)
396
 
397
+ # 处理提示词
398
  style_prefix = STYLE_PROMPTS.get(style, "")
399
  processed_prompt = process_long_prompt(style_prefix + prompt, max_length=150)
400
+
401
+ # 使用模型默认负面提示词(如果用户未提供)
402
+ if not negative_prompt.strip():
403
+ negative_prompt = model_config["default_negative"]
404
  processed_negative = process_long_prompt(negative_prompt, max_length=100)
405
 
406
+ # 准备LoRA配置
407
  lora_configs = []
408
  active_trigger_words = []
409
 
410
+ # 添加固定LoRA(如果启用)
411
+ if use_fixed_loras:
412
+ for name, config in FIXED_LORAS.items():
413
+ if config["repo_id"] and config["enabled"]:
414
+ lora_configs.append({
415
+ 'name': name,
416
+ 'repo_id': config["repo_id"],
417
+ 'weight': config["weight"]
418
+ })
419
+ if config["trigger_words"]:
420
+ active_trigger_words.append(config["trigger_words"])
421
 
422
+ # 添加用户选择的LoRA
423
  for lora_name in selected_loras:
424
  if lora_name != "None" and lora_name in OPTIONAL_LORAS:
425
  config = OPTIONAL_LORAS[lora_name]
 
432
  if config["trigger_words"]:
433
  active_trigger_words.append(config["trigger_words"])
434
 
435
+ # 加载LoRA
436
  load_lora_weights(lora_configs)
437
 
438
+ # 组合触发词
439
  if active_trigger_words:
440
  trigger_text = ", ".join(active_trigger_words)
441
  final_prompt = f"{processed_prompt}, {trigger_text}"
442
  else:
443
  final_prompt = processed_prompt
444
 
445
+ # 生成图像
446
  with torch.autocast(device):
447
  image = pipe(
448
  prompt=final_prompt,
 
454
  generator=generator,
455
  ).images[0]
456
 
457
+ # 生成元数据
458
  timestamp = datetime.datetime.now()
459
  metadata = {
460
+ "model": model_config["description"],
461
+ "model_repo": model_config["repo_id"],
462
  "prompt": final_prompt,
463
  "original_prompt": prompt,
464
  "negative_prompt": processed_negative,
 
465
  "style": style,
466
+ "fixed_loras_enabled": use_fixed_loras,
467
+ "fixed_loras": [name for name, config in FIXED_LORAS.items() if config["enabled"]] if use_fixed_loras else [],
468
  "selected_loras": [name for name in selected_loras if name != "None"],
469
  "lora_scale": lora_scale,
470
  "seed": seed,
 
477
  "trigger_words": active_trigger_words
478
  }
479
 
 
 
 
 
 
 
 
 
 
 
480
  metadata_str = json.dumps(metadata, indent=2, ensure_ascii=False)
481
 
482
  return (
483
  image,
484
+ metadata_str,
485
+ f"✅ 生成成功! 种子: {seed}"
486
  )
487
 
488
  except Exception as e:
489
+ error_msg = f"生成失败: {str(e)}"
490
  print(f"❌ {error_msg}")
491
+ return None, error_msg, error_msg
492
 
493
  # ======================
494
+ # Gradio界面
495
  # ======================
496
  def create_interface():
497
+ """创建Gradio界面"""
498
 
499
  with gr.Blocks(
500
  theme=gr.themes.Soft(
501
+ primary_hue="blue",
502
+ secondary_hue="purple",
503
  neutral_hue="slate",
 
 
 
 
 
 
504
  ),
505
  css="""
506
+ .model-card {
507
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
508
+ padding: 20px;
509
+ border-radius: 12px;
510
+ color: white;
511
+ margin-bottom: 20px;
 
 
 
 
 
 
 
 
 
512
  }
513
+ .control-section {
514
  background: rgba(255,255,255,0.05);
515
+ border-radius: 12px;
516
+ padding: 15px;
517
+ margin: 10px 0;
518
  }
519
  """,
520
+ title="AI图像生成器 - Illustrious XL多模型版"
521
  ) as demo:
522
 
523
  gr.Markdown("""
524
+ # 🎨 AI图像生成器 - Illustrious XL多模型版
525
+ ### 支持多种SDXL模型自由切换 | 灵活的LoRA组合 | 优化的参数配置
526
+ """, elem_classes=["model-card"])
527
 
528
  with gr.Row():
529
+ # 左侧 - 控制面板
530
+ with gr.Column(scale=3):
 
 
 
 
 
 
 
 
 
531
 
532
+ # 模型选择
533
+ with gr.Group(elem_classes=["control-section"]):
534
+ gr.Markdown("### 📦 模型选择")
535
+ model_dropdown = gr.Dropdown(
536
+ choices=[(config["description"], key) for key, config in MODEL_CONFIGS.items()],
537
+ value=DEFAULT_MODEL_KEY,
538
+ label="基础模型",
539
+ info="选择不同的模型以获得不同的风格"
540
+ )
541
+ model_info = gr.Markdown(MODEL_CONFIGS[DEFAULT_MODEL_KEY]["description"])
542
 
543
+ # 提示词输入
544
+ with gr.Group(elem_classes=["control-section"]):
545
+ gr.Markdown("### ✍️ 提示词")
546
+ prompt_input = gr.Textbox(
547
+ label="正面提示词",
548
+ placeholder="描述你想要生成的图像...",
549
+ lines=4,
550
+ max_lines=20
551
+ )
552
+
553
+ negative_prompt_input = gr.Textbox(
554
+ label="负面提示词(留空使用模型默认)",
555
+ placeholder="将自动使用所选模型的推荐负面提示词...",
556
+ lines=3,
557
+ max_lines=15
558
+ )
559
+
560
+ style_radio = gr.Radio(
561
+ choices=list(STYLE_PROMPTS.keys()),
562
+ label="风格模板",
563
+ value="None",
564
+ info="将自动添加到提示词前"
565
+ )
566
 
567
+ # 基础参数
568
+ with gr.Group(elem_classes=["control-section"]):
569
+ gr.Markdown("### ⚙️ 基础参数")
570
+
571
+ with gr.Row():
572
  seed_input = gr.Slider(
573
  minimum=-1,
574
  maximum=99999999,
575
  step=1,
576
  value=DEFAULT_SEED,
577
+ label="种子 (-1=随机)"
578
  )
579
+
580
+ with gr.Row():
 
 
 
581
  width_input = gr.Slider(
582
  minimum=512,
583
  maximum=1536,
584
  step=64,
585
  value=DEFAULT_WIDTH,
586
+ label="宽度"
587
  )
 
 
 
 
588
  height_input = gr.Slider(
589
  minimum=512,
590
  maximum=1536,
591
  step=64,
592
  value=DEFAULT_HEIGHT,
593
+ label="高度"
594
+ )
595
+
596
+ with gr.Row():
597
+ steps_slider = gr.Slider(
598
+ minimum=10,
599
+ maximum=100,
600
+ step=1,
601
+ value=DEFAULT_STEPS,
602
+ label="采样步数"
603
+ )
604
+ cfg_slider = gr.Slider(
605
+ minimum=1.0,
606
+ maximum=20.0,
607
+ step=0.5,
608
+ value=DEFAULT_CFG,
609
+ label="CFG Scale"
610
  )
 
 
 
 
 
 
 
 
 
 
611
 
612
+ # LoRA配置
613
+ with gr.Group(elem_classes=["control-section"]):
614
+ gr.Markdown("### 🎭 LoRA配置")
615
+
616
+ use_fixed_loras = gr.Checkbox(
617
+ label="启用固定LoRA增强(质量+细节)",
618
+ value=True,
619
+ info="自动加载质量和细节增强LoRA"
620
+ )
621
+
622
+ lora_dropdown = gr.Dropdown(
623
+ choices=list(OPTIONAL_LORAS.keys()),
624
+ label="额外LoRA(可多选)",
625
+ value=["None"],
626
+ multiselect=True,
627
+ info="选择额外的风格LoRA"
628
+ )
629
+
630
  lora_scale_slider = gr.Slider(
631
  minimum=0.0,
632
  maximum=1.5,
633
  step=0.05,
634
  value=DEFAULT_LORA_SCALE,
635
+ label="LoRA强度"
636
  )
 
637
 
638
+ # 生成按钮
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
639
  generate_btn = gr.Button(
640
+ "✨ 生成图像",
641
  variant="primary",
642
+ size="lg"
643
+ )
644
+
645
+ status_text = gr.Textbox(
646
+ label="状态",
647
+ value="准备就绪",
648
+ interactive=False
649
  )
650
 
651
+ # 右侧 - 输出
652
  with gr.Column(scale=2):
 
653
  image_output = gr.Image(
654
+ label="生成的图像",
655
+ height=600,
656
  format="webp"
657
  )
658
 
659
+ gr.Markdown("**右键点击图像下载**")
 
 
660
 
 
661
  metadata_output = gr.Textbox(
662
+ label="生成元数据 (JSON)",
663
  lines=15,
664
+ max_lines=25
 
665
  )
666
 
667
  # ======================
668
+ # 事件处理
669
  # ======================
670
 
671
+ # 模型切换时更新信息
672
+ def update_model_info(model_key):
673
+ config = MODEL_CONFIGS[model_key]
674
+ info = f"""
675
+ **模型:** {config['description']}
676
+ **仓库:** `{config['repo_id']}`
677
+ **推荐设置:** 步数={config['optimal_settings']['steps']}, CFG={config['optimal_settings']['cfg']}
678
+ """
679
+ return (
680
+ info,
681
+ config['optimal_settings']['steps'],
682
+ config['optimal_settings']['cfg'],
683
+ config['default_negative']
684
+ )
685
 
686
+ model_dropdown.change(
687
+ fn=update_model_info,
688
+ inputs=[model_dropdown],
689
+ outputs=[model_info, steps_slider, cfg_slider, negative_prompt_input]
690
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
691
 
692
+ # 生成按钮
693
  generate_btn.click(
694
  fn=generate_image,
695
  inputs=[
696
+ model_dropdown, prompt_input, negative_prompt_input, style_radio,
697
  seed_input, width_input, height_input,
698
  lora_dropdown, lora_scale_slider,
699
+ steps_slider, cfg_slider, use_fixed_loras,
700
+ gr.Textbox(value="zh", visible=False)
701
  ],
702
  outputs=[
703
+ image_output, metadata_output, status_text
704
  ]
705
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
706
 
707
  return demo
708
 
709
  # ======================
710
+ # 启动应用
711
  # ======================
712
  if __name__ == "__main__":
713
  demo = create_interface()