Lasya18 commited on
Commit
eb62c88
·
verified ·
1 Parent(s): 8cbf01a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +547 -492
app.py CHANGED
@@ -1,570 +1,625 @@
1
- import gradio_client.utils
2
-
3
- # Store the original function
4
- original_get_type = gradio_client.utils.get_type
5
-
6
- def patched_get_type(schema):
7
- """Patched version that handles boolean schemas properly"""
8
- # If schema is a boolean, return appropriate type
9
- if isinstance(schema, bool):
10
- return "bool"
11
-
12
- # If schema is not a dict, return a default or handle appropriately
13
- if not isinstance(schema, dict):
14
- return "any"
15
-
16
- # Call original function for dict schemas
17
- return original_get_type(schema)
18
-
19
- # Apply the patch
20
- gradio_client.utils.get_type = patched_get_type
21
-
22
- # Now import and use gradio normally
23
-
24
  import gradio as gr
25
  import torch
26
- from PIL import Image, ImageDraw, ImageFilter
27
- import numpy as np
28
- from transformers import (
29
- DetrImageProcessor,
30
- DetrForObjectDetection,
31
- BlipProcessor,
32
- BlipForConditionalGeneration,
33
- pipeline
34
- )
35
- from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, StableDiffusionInpaintPipeline
36
  import cv2
37
- import os
38
- from typing import List, Tuple
39
  import warnings
 
 
 
 
 
40
  warnings.filterwarnings("ignore")
 
 
41
 
42
- # Force CPU usage for free tier
43
- device = "cpu"
44
- torch.set_num_threads(2)
 
 
45
 
46
- # Global variables for models
47
- detector_processor = None
48
- detector_model = None
49
- caption_processor = None
50
- caption_model = None
51
- controlnet_pipe = None
52
- inpaint_pipe = None
53
 
54
- def load_models():
55
- """Load models optimized for interior design room transformation"""
56
- global detector_processor, detector_model
57
- global caption_processor, caption_model, controlnet_pipe, inpaint_pipe
58
-
59
- try:
60
- print("Loading furniture detection model...")
61
- try:
62
- detector_processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50")
63
- detector_model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50")
64
- detector_model.eval()
65
- print("✅ DETR model loaded successfully")
66
- except Exception as detr_error:
67
- print(f"⚠️ DETR failed: {detr_error}")
68
- detector_processor = None
69
- detector_model = None
70
-
71
- print("Loading image captioning model...")
72
- caption_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
73
- caption_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
74
- caption_model.eval()
75
- print(" BLIP model loaded successfully")
76
-
77
- print("Loading ControlNet for room layout preservation...")
78
- controlnet = ControlNetModel.from_pretrained(
79
- "lllyasviel/sd-controlnet-canny",
80
- torch_dtype=torch.float32
81
- )
82
-
83
- controlnet_pipe = StableDiffusionControlNetPipeline.from_pretrained(
84
- "runwayml/stable-diffusion-v1-5",
85
- controlnet=controlnet,
86
- torch_dtype=torch.float32,
87
- safety_checker=None,
88
- requires_safety_checker=False
89
- )
90
 
91
- print("Applying CPU optimizations to ControlNet pipeline...")
92
- controlnet_pipe.enable_attention_slicing()
 
93
 
94
- try:
95
- import accelerate
96
- if torch.cuda.is_available():
97
- controlnet_pipe.enable_sequential_cpu_offload()
98
- print("✅ ControlNet: Sequential CPU offload enabled")
99
- else:
100
- try:
101
- controlnet_pipe.enable_model_cpu_offload()
102
- print("✅ ControlNet: Model CPU offload enabled")
103
- except:
104
- print("✅ ControlNet: Running without CPU offload (CPU-only mode)")
105
- except ImportError:
106
- print("⚠️ Accelerate not available, running without CPU offload")
107
- except Exception as e:
108
- print(f"⚠️ CPU offload setup failed: {e}")
109
-
110
- print("Loading inpainting pipeline for furniture replacement...")
111
- inpaint_pipe = StableDiffusionInpaintPipeline.from_pretrained(
112
- "runwayml/stable-diffusion-v1-5",
113
- torch_dtype=torch.float32,
114
- safety_checker=None,
115
- requires_safety_checker=False
116
- )
117
-
118
- print("Applying CPU optimizations to inpainting pipeline...")
119
- inpaint_pipe.enable_attention_slicing()
120
 
121
  try:
122
- import accelerate
123
- if torch.cuda.is_available():
124
- inpaint_pipe.enable_sequential_cpu_offload()
125
- print("✅ Inpainting: Sequential CPU offload enabled")
126
- else:
127
- try:
128
- inpaint_pipe.enable_model_cpu_offload()
129
- print("✅ Inpainting: Model CPU offload enabled")
130
- except:
131
- print("✅ Inpainting: Running without CPU offload (CPU-only mode)")
132
- except ImportError:
133
- print("⚠️ Accelerate not available for inpainting, running without CPU offload")
134
- except Exception as e:
135
- print(f"⚠️ Inpainting CPU offload setup failed: {e}")
136
-
137
- print("🎉 All models loaded successfully!")
138
- return True
139
-
140
- except Exception as e:
141
- print(f"❌ Critical error loading models: {e}")
142
- import traceback
143
- print(f"Full traceback: {traceback.format_exc()}")
144
- return False
145
-
146
- def detect_furniture_and_style(inspiration_image: Image.Image) -> Tuple[List[dict], str, str]:
147
- """Analyze inspiration image for furniture and style"""
148
- try:
149
- # Detect furniture items
150
- furniture_items = []
151
- if detector_model is not None and detector_processor is not None:
152
- if max(inspiration_image.size) > 800:
153
- resized_img = inspiration_image.resize((800, int(800 * inspiration_image.size[1] / inspiration_image.size[0])))
154
- else:
155
- resized_img = inspiration_image
156
 
157
- inputs = detector_processor(images=resized_img, return_tensors="pt")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158
 
159
- with torch.no_grad():
160
- outputs = detector_model(**inputs)
 
 
161
 
162
- target_sizes = torch.tensor([resized_img.size[::-1]])
163
- results = detector_processor.post_process_object_detection(
164
- outputs, target_sizes=target_sizes, threshold=0.6
165
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
 
167
- furniture_keywords = ['chair', 'couch', 'bed', 'dining table', 'tv',
168
- 'sofa', 'table', 'desk', 'cabinet', 'refrigerator', 'oven']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
 
170
- for score, label, box in zip(results[0]["scores"], results[0]["labels"], results[0]["boxes"]):
171
- label_name = detector_model.config.id2label[label.item()]
172
- if any(keyword in label_name.lower() for keyword in furniture_keywords):
173
- furniture_items.append({
174
- 'label': label_name,
175
- 'confidence': score.item(),
176
- 'box': box.tolist()
177
- })
178
-
179
- # Analyze style using caption
180
- if max(inspiration_image.size) > 512:
181
- caption_img = inspiration_image.resize((512, int(512 * inspiration_image.size[1] / inspiration_image.size[0])))
182
- else:
183
- caption_img = inspiration_image
184
 
185
- inputs = caption_processor(caption_img, return_tensors="pt")
186
- with torch.no_grad():
187
- out = caption_model.generate(**inputs, max_length=30, num_beams=2)
188
-
189
- caption = caption_processor.decode(out[0], skip_special_tokens=True)
190
-
191
- # Style detection
192
- style_keywords = {
193
- 'modern': ['modern', 'contemporary', 'sleek', 'minimal', 'glass', 'steel', 'clean'],
194
- 'traditional': ['traditional', 'classic', 'wood', 'ornate', 'vintage', 'antique'],
195
- 'minimalist': ['minimal', 'simple', 'clean', 'white', 'empty', 'sparse'],
196
- 'industrial': ['industrial', 'metal', 'concrete', 'exposed', 'brick'],
197
- 'scandinavian': ['light', 'bright', 'wooden', 'cozy', 'natural'],
198
- 'rustic': ['rustic', 'rural', 'country', 'farmhouse']
199
- }
200
-
201
- caption_lower = caption.lower()
202
- style_scores = {}
203
-
204
- for style, keywords in style_keywords.items():
205
- score = sum(2 if keyword in caption_lower else 0 for keyword in keywords)
206
- if style in caption_lower:
207
- score += 5
208
- style_scores[style] = score
 
 
 
209
 
210
- detected_style = max(style_scores, key=style_scores.get) if max(style_scores.values()) > 0 else 'modern'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
 
212
- return furniture_items, detected_style, caption
213
 
214
- except Exception as e:
215
- print(f"Error analyzing inspiration image: {e}")
216
- return [], 'modern', 'A furnished interior space'
217
-
218
- def create_canny_control(user_room_image: Image.Image) -> Image.Image:
219
- """Create Canny edge detection for ControlNet to preserve room structure"""
220
- try:
221
- # Convert PIL to numpy
222
- image_np = np.array(user_room_image)
223
 
224
- # Convert to grayscale
225
- gray = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)
 
226
 
227
- # Apply Gaussian blur to reduce noise
228
- blurred = cv2.GaussianBlur(gray, (5, 5), 0)
 
229
 
230
- # Apply Canny edge detection with adjusted parameters for room layouts
231
- canny = cv2.Canny(blurred, 50, 150)
 
 
 
 
 
 
 
232
 
233
- # Convert back to PIL
234
- canny_image = Image.fromarray(canny)
 
 
 
 
 
 
 
 
235
 
236
- return canny_image
237
 
238
- except Exception as e:
239
- print(f"Error creating canny control: {e}")
240
- return user_room_image.convert('L')
241
-
242
- def transform_room_with_inspiration(
243
- inspiration_image: Image.Image,
244
- user_room_image: Image.Image,
245
- style_override: str = "Auto-detect",
246
- transformation_strength: float = 0.8,
247
- quality: str = "Balanced"
248
- ) -> Tuple[Image.Image, str]:
249
- """Main function to transform user's room with inspiration style while preserving structure"""
250
-
251
- if inspiration_image is None or user_room_image is None:
252
- return None, "❌ Please upload both images:\n1. Inspiration image (interior design you like)\n2. Your room image (to be transformed)"
253
-
254
- try:
255
- print("🎯 STEP 1: Analyzing inspiration image for style and furniture...")
256
 
257
- # Step 1: Analyze inspiration image to extract style and furniture elements
258
- furniture_items, detected_style, scene_description = detect_furniture_and_style(inspiration_image)
 
259
 
260
- # Use style override if provided, otherwise use detected style
261
- final_style = style_override if style_override != "Auto-detect" else detected_style
262
 
263
- print(f"📊 Detected style: {detected_style}, Using: {final_style}")
264
- print(f"🪑 Found {len(furniture_items)} furniture items")
 
265
 
266
- print("🎯 STEP 2: Analyzing user room structure for preservation...")
267
 
268
- # Step 2: Create ControlNet control image to preserve room structure
269
- canny_control = create_canny_control(user_room_image)
 
 
 
 
 
 
 
 
 
 
270
 
271
- print("🎯 STEP 3: Building transformation prompt from inspiration...")
 
 
272
 
273
- # Step 3: Build detailed prompt based on inspiration analysis
274
- furniture_list = [item['label'] for item in furniture_items]
275
- furniture_str = ", ".join(set(furniture_list)) if furniture_list else "modern furniture"
 
 
 
276
 
277
- # Style-specific prompt generation
278
- style_descriptions = {
279
- 'modern': "modern interior design, contemporary furniture, clean lines, sleek finishes, neutral colors, minimalist aesthetic",
280
- 'traditional': "traditional interior design, classic furniture, warm wood tones, elegant details, ornate elements",
281
- 'minimalist': "minimalist interior design, simple clean furniture, neutral palette, uncluttered space, zen aesthetic",
282
- 'industrial': "industrial interior design, metal fixtures, exposed elements, urban aesthetic, concrete and steel",
283
- 'scandinavian': "scandinavian interior design, light wood furniture, cozy textiles, natural light, hygge atmosphere",
284
- 'rustic': "rustic interior design, wooden furniture, country style, natural textures, farmhouse charm"
285
- }
286
 
287
- style_prompt = style_descriptions.get(final_style.lower(), style_descriptions['modern'])
 
 
288
 
289
- # Create comprehensive prompt that incorporates inspiration elements
290
- if furniture_list:
291
- main_prompt = f"{style_prompt}, featuring {furniture_str}, well-arranged interior space, professional photography, beautiful lighting, high quality interior design"
292
- else:
293
- main_prompt = f"{style_prompt}, beautifully furnished room, professional interior photography, elegant arrangement, high quality"
 
 
 
294
 
295
- negative_prompt = "blurry, low quality, distorted, cluttered, dark, bad architecture, deformed furniture, amateur photography, poor lighting"
 
 
 
296
 
297
- print("🎯 STEP 4: Generating transformed room while preserving structure...")
298
- print("⏱️ This will take 3-5 minutes on CPU...")
 
299
 
300
- # Step 4: Generate transformation using ControlNet to preserve room structure
301
- inference_steps = {"Fast": 12, "Balanced": 18, "High Quality": 25}
302
- steps = inference_steps.get(quality, 18)
303
 
304
- # Resize images for consistent processing while maintaining aspect ratios
305
- target_size = (512, 512)
306
- user_room_resized = user_room_image.resize(target_size)
307
- canny_control_resized = canny_control.resize(target_size)
308
 
309
- # Generate the transformed image using ControlNet
310
- with torch.no_grad():
311
- generated_image = controlnet_pipe(
312
- prompt=main_prompt,
313
- negative_prompt=negative_prompt,
314
- image=canny_control_resized, # This preserves the room structure
315
- num_inference_steps=steps,
316
- guidance_scale=7.5,
317
- controlnet_conditioning_scale=transformation_strength, # How much to preserve vs transform
318
- height=target_size[1],
319
- width=target_size[0],
320
- generator=torch.manual_seed(42) # For reproducible results
321
- ).images[0]
322
 
323
- # Resize result back to match original user room dimensions
324
- final_size = user_room_image.size
325
- generated_image = generated_image.resize(final_size, Image.LANCZOS)
326
 
327
- print("🎯 STEP 5: Creating transformation analysis report...")
 
 
328
 
329
- # Step 5: Create detailed analysis report
330
- furniture_report = ""
331
- if furniture_items:
332
- furniture_report = "\n".join([f"• {item['label']} (confidence: {item['confidence']:.2f})" for item in furniture_items[:8]])
333
- else:
334
- furniture_report = "• General interior styling elements detected"
335
 
336
- analysis_report = f"""**🎨 ROOM TRANSFORMATION COMPLETE**
337
-
338
- **📸 INSPIRATION ANALYSIS:**
339
- **Original Style Detected:** {detected_style.title()}
340
- • **Applied Style:** {final_style.title()}
341
- **Scene Description:** {scene_description}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
342
 
343
- **🪑 FURNITURE & ELEMENTS FROM INSPIRATION:**
344
- {furniture_report}
345
 
346
- **🏠 TRANSFORMATION DETAILS:**
347
- **Structure Preservation:** ✅ Room dimensions and layout preserved using ControlNet
348
- **Style Transfer:** ✅ {final_style.title()} aesthetic applied from inspiration
349
- **Transformation Strength:** {transformation_strength:.1%} (higher = more dramatic change)
350
- **Quality Setting:** {quality} ({steps} generation steps)
351
- **Processing Method:** Canny edge detection + Stable Diffusion ControlNet
352
 
353
- **🎯 WHAT WAS PRESERVED:**
354
- Room walls and architectural structure
355
- Window/door positions and proportions
356
- Overall room dimensions and layout
357
- • Spatial relationships and perspective
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358
 
359
- **✨ WHAT WAS TRANSFORMED:**
360
- Interior design style and aesthetic
361
- Furniture arrangement and selection
362
- Color scheme and lighting mood
363
- Decorative elements and textures
364
 
365
- **💡 Tips for Better Results:**
366
- • Try different transformation strengths (0.3-1.0)
367
- • Use clear, well-lit photos of both images
368
- Similar room types work best (bedroom to bedroom, etc.)
369
- Experiment with style overrides for different aesthetics
370
- """
 
 
 
 
 
371
 
372
- print("✅ TRANSFORMATION COMPLETED SUCCESSFULLY!")
373
- return generated_image, analysis_report
374
 
375
  except Exception as e:
376
- error_msg = f"""❌ TRANSFORMATION FAILED
377
 
378
- **Error:** {str(e)}
379
 
380
- **Troubleshooting:**
381
- Ensure both images are clear and well-lit
382
- • Try reducing transformation strength
383
- • Check that images show interior spaces
384
- • Try different quality settings
385
 
386
- Please try again with different images or settings."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
387
 
388
- print(f"Error in room transformation: {e}")
389
- return None, error_msg
390
 
391
- def create_room_transformer_interface():
392
- """Create the main room transformation interface"""
393
 
394
  with gr.Blocks(
395
- title="AI Room Transformer - Style Transfer with Structure Preservation",
 
396
  css="""
397
- .main-container { max-width: 1200px; margin: 0 auto; }
398
- .header { text-align: center; padding: 2rem 0; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; margin-bottom: 2rem; border-radius: 10px; }
399
- .instruction-box { background: #f8f9fa; padding: 1.5rem; border-radius: 8px; margin: 1rem 0; border-left: 4px solid #007bff; }
400
- .settings-panel { background: #e3f2fd; padding: 1rem; border-radius: 8px; margin: 1rem 0; }
401
- .result-panel { background: #f1f8e9; padding: 1rem; border-radius: 8px; margin: 1rem 0; }
 
 
402
  """
403
  ) as demo:
404
 
405
- with gr.Column(elem_classes="main-container"):
406
- # Header
407
- gr.HTML("""
408
- <div class="header">
409
- <h1>🏠 AI Room Transformer</h1>
410
- <h3>Transform Your Room with Inspiration While Preserving Structure</h3>
411
- <p>Upload an inspiration image and your room image to see the magic happen!</p>
412
- </div>
413
- """)
414
-
415
- # Instructions
416
- gr.HTML("""
417
- <div class="instruction-box">
418
- <h3>📋 How It Works:</h3>
419
- <ol>
420
- <li><strong>Inspiration Image:</strong> Upload an interior design photo you love (this provides the style, colors, and furniture ideas)</li>
421
- <li><strong>Your Room Image:</strong> Upload a photo of your room (empty or furnished - the structure will be preserved)</li>
422
- <li><strong>AI Magic:</strong> Our AI analyzes the inspiration style and applies it to your room while keeping walls, doors, windows in place</li>
423
- <li><strong>Result:</strong> Get a transformed room that keeps your space's dimensions but adopts the inspiration's aesthetic</li>
424
- </ol>
425
- </div>
426
- """)
427
-
428
- # Main interface
429
- with gr.Row():
430
- # Input Section
431
- with gr.Column(scale=1):
432
- gr.Markdown("### 🎨 INPUT IMAGES")
433
-
434
- inspiration_input = gr.Image(
435
- label="📸 Inspiration Image (Interior Design You Like)",
436
- type="pil",
437
- height=300
438
- )
439
- gr.Markdown("*Upload a beautiful interior design photo for style reference*")
440
-
441
- user_room_input = gr.Image(
442
- label="🏠 Your Room Image (To Be Transformed)",
443
- type="pil",
444
- height=300
445
- )
446
- gr.Markdown("*Upload your room photo - structure will be preserved*")
447
-
448
- # Settings Panel
449
- with gr.Column(elem_classes="settings-panel"):
450
- gr.Markdown("### ⚙️ TRANSFORMATION SETTINGS")
451
 
452
- style_override = gr.Dropdown(
453
- choices=["Auto-detect", "Modern", "Traditional", "Minimalist",
454
- "Industrial", "Scandinavian", "Rustic"],
455
- label="🎭 Style Override",
456
- value="Auto-detect",
457
- info="Override auto-detected style if desired"
458
- )
459
 
460
- transformation_strength = gr.Slider(
461
- minimum=0.3,
462
- maximum=1.0,
463
- value=0.8,
464
- step=0.1,
465
- label="💪 Transformation Strength",
466
- info="Higher = more dramatic change, Lower = more preservation"
467
  )
468
 
469
- quality_setting = gr.Dropdown(
470
- choices=["Fast", "Balanced", "High Quality"],
471
- label=" Quality Setting",
472
- value="Balanced",
473
- info="Higher quality takes longer but produces better results"
474
- )
475
-
476
- transform_btn = gr.Button(
477
- "🎨 TRANSFORM MY ROOM",
478
- variant="primary",
479
- size="lg"
480
- )
481
-
482
- # Output Section
483
- with gr.Column(scale=1):
484
- gr.Markdown("### ✨ TRANSFORMATION RESULT")
485
 
486
- with gr.Column(elem_classes="result-panel"):
487
- output_image = gr.Image(
488
- label="🏠 Your Transformed Room",
489
- type="pil",
490
- height=400
491
- )
492
 
493
- analysis_output = gr.Markdown(
494
- value="**Upload both images and click 'TRANSFORM MY ROOM' to see the magic!**\n\n⏱️ Processing typically takes 3-5 minutes on CPU.",
495
- label="📊 Transformation Analysis"
 
 
 
496
  )
497
 
498
- # Process button click
499
- transform_btn.click(
500
- fn=transform_room_with_inspiration,
501
- inputs=[
502
- inspiration_input,
503
- user_room_input,
504
- style_override,
505
- transformation_strength,
506
- quality_setting
507
- ],
508
- outputs=[output_image, analysis_output]
509
- )
510
-
511
- # Footer
512
- gr.HTML("""
513
- <div style="text-align: center; margin-top: 2rem; padding: 1rem; background: #f8f9fa; border-radius: 8px;">
514
- <p><strong>🚀 AI Room Transformer</strong> | Powered by Stable Diffusion + ControlNet</p>
515
- <p>Preserves your room structure while applying inspiration aesthetics</p>
516
- </div>
517
- """)
 
 
 
 
 
 
 
 
 
518
 
519
  return demo
520
 
521
- # Main execution
522
  if __name__ == "__main__":
523
- print("🏠 Starting AI Room Transformer with Structure Preservation...")
524
- print(f"📋 Device: {device}")
525
- print(f"🔧 PyTorch: {torch.__version__}")
526
 
527
- try:
528
- if load_models():
529
- print("✅ All models loaded successfully!")
530
- print("🚀 Creating room transformation interface...")
531
-
532
- demo = create_room_transformer_interface()
533
-
534
- print("🌐 Launching application...")
535
- print("🎯 Ready to transform rooms while preserving structure!")
536
-
537
- # Launch with proper settings for different environments
538
- demo.launch(
539
- share=True, # Create public link
540
- show_error=True,
541
- debug=False,
542
- server_name="0.0.0.0",
543
- server_port=7860 if os.environ.get('SPACE_ID') else None # Use 7860 for HF Spaces
544
- )
545
-
546
- else:
547
- print("❌ Model loading failed!")
548
- # Create simple error interface
549
- with gr.Blocks() as error_demo:
550
- gr.Markdown("# ❌ AI Room Transformer - Model Loading Error")
551
- gr.Markdown("""
552
- The AI models failed to load properly.
553
-
554
- **Common causes:**
555
- - Insufficient memory
556
- - Network issues downloading models
557
- - Missing dependencies
558
-
559
- **Please try:**
560
- 1. Refreshing the page
561
- 2. Waiting a few minutes for models to download
562
- 3. Checking system resources
563
- """)
564
-
565
- error_demo.launch(share=True, server_name="0.0.0.0")
566
-
567
- except Exception as e:
568
- print(f"❌ Critical startup error: {e}")
569
- import traceback
570
- traceback.print_exc()
 
1
+ import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import gradio as gr
3
  import torch
 
 
 
 
 
 
 
 
 
 
4
  import cv2
5
+ import numpy as np
6
+ from PIL import Image, ImageEnhance
7
  import warnings
8
+ from typing import Tuple, Optional, Dict
9
+ import logging
10
+ import traceback
11
+
12
+ # Suppress warnings for cleaner output
13
  warnings.filterwarnings("ignore")
14
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
15
+ logger = logging.getLogger(__name__)
16
 
17
+ # Configuration
18
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
19
+ DTYPE = torch.float16 if DEVICE == "cuda" else torch.float32
20
+ MAX_RESOLUTION = 512 # Reduced for better compatibility
21
+ MIN_RESOLUTION = 256
22
 
23
+ # HuggingFace token from environment
24
+ HF_TOKEN = os.environ.get('HF_TOKEN')
 
 
 
 
 
25
 
26
+ class InteriorDesignPipeline:
27
+ def __init__(self):
28
+ self.device = DEVICE
29
+ self.dtype = DTYPE
30
+ self.pipe = None
31
+ self.controlnet = None
32
+ self.canny_detector = None
33
+ self.initialized = False
34
+ self.mode = "uninitialized"
35
+ self.error_log = []
36
+
37
+ def log_error(self, error_msg):
38
+ """Log errors for debugging"""
39
+ self.error_log.append(error_msg)
40
+ logger.error(error_msg)
41
+
42
+ def initialize_pipeline(self):
43
+ """Initialize pipeline with detailed error logging"""
44
+ if self.initialized:
45
+ return True
46
+
47
+ logger.info("🔧 Starting Pipeline Initialization...")
48
+ logger.info(f"📱 Device: {self.device}")
49
+ logger.info(f"🔢 Data type: {self.dtype}")
50
+ logger.info(f"💾 CUDA available: {torch.cuda.is_available()}")
 
 
 
 
 
 
 
 
 
 
 
51
 
52
+ if torch.cuda.is_available():
53
+ logger.info(f"🎮 CUDA device: {torch.cuda.get_device_name()}")
54
+ logger.info(f"💾 CUDA memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f}GB")
55
 
56
+ logger.info(f"🔑 HF Token available: {'Yes' if HF_TOKEN else 'No'}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
  try:
59
+ # Step 1: Check package availability
60
+ logger.info("📦 Checking package availability...")
61
+ try:
62
+ import diffusers
63
+ logger.info(f"✓ diffusers version: {diffusers.__version__}")
64
+ except ImportError as e:
65
+ self.log_error(f"❌ diffusers not available: {e}")
66
+ self.mode = "fallback"
67
+ self.initialized = True
68
+ return True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
+ try:
71
+ import transformers
72
+ logger.info(f"✓ transformers version: {transformers.__version__}")
73
+ except ImportError as e:
74
+ self.log_error(f"❌ transformers not available: {e}")
75
+ self.mode = "fallback"
76
+ self.initialized = True
77
+ return True
78
+
79
+ try:
80
+ import controlnet_aux
81
+ logger.info(f"✓ controlnet_aux available")
82
+ except ImportError as e:
83
+ self.log_error(f"❌ controlnet_aux not available: {e}")
84
+ self.mode = "fallback"
85
+ self.initialized = True
86
+ return True
87
 
88
+ # Step 2: Import classes
89
+ logger.info("📚 Importing model classes...")
90
+ from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel
91
+ from controlnet_aux import CannyDetector
92
 
93
+ # Step 3: Try to load ControlNet (smaller model first)
94
+ logger.info("🔍 Loading ControlNet model...")
95
+ try:
96
+ # Try with minimal settings first
97
+ self.controlnet = ControlNetModel.from_pretrained(
98
+ "diffusers/controlnet-canny-sdxl-1.0",
99
+ torch_dtype=self.dtype,
100
+ token=HF_TOKEN,
101
+ cache_dir="./hf_cache",
102
+ local_files_only=False,
103
+ resume_download=True
104
+ )
105
+ logger.info("✅ ControlNet loaded successfully")
106
+ except Exception as e:
107
+ self.log_error(f"❌ ControlNet loading failed: {e}")
108
+ self.log_error(f"Full traceback: {traceback.format_exc()}")
109
+ self.mode = "fallback"
110
+ self.initialized = True
111
+ return True
112
 
113
+ # Step 4: Load main pipeline
114
+ logger.info("🎨 Loading SDXL pipeline...")
115
+ try:
116
+ self.pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
117
+ "stabilityai/stable-diffusion-xl-base-1.0",
118
+ controlnet=self.controlnet,
119
+ torch_dtype=self.dtype,
120
+ token=HF_TOKEN,
121
+ cache_dir="./hf_cache",
122
+ local_files_only=False,
123
+ resume_download=True,
124
+ variant="fp16" if self.dtype == torch.float16 else None,
125
+ use_safetensors=True
126
+ )
127
+ logger.info("✅ SDXL pipeline loaded successfully")
128
+ except Exception as e:
129
+ self.log_error(f"❌ SDXL pipeline loading failed: {e}")
130
+ self.log_error(f"Full traceback: {traceback.format_exc()}")
131
+ self.mode = "fallback"
132
+ self.initialized = True
133
+ return True
134
 
135
+ # Step 5: Move to device
136
+ logger.info(f"🚀 Moving pipeline to {self.device}...")
137
+ try:
138
+ self.pipe = self.pipe.to(self.device)
139
+ logger.info("✅ Pipeline moved to device successfully")
140
+ except Exception as e:
141
+ self.log_error(f"❌ Failed to move pipeline to device: {e}")
142
+ self.mode = "fallback"
143
+ self.initialized = True
144
+ return True
 
 
 
 
145
 
146
+ # Step 6: Apply optimizations
147
+ logger.info("⚡ Applying memory optimizations...")
148
+ try:
149
+ if self.device == "cuda":
150
+ # Only apply optimizations that are likely to work
151
+ self.pipe.enable_vae_slicing()
152
+ logger.info("✓ VAE slicing enabled")
153
+
154
+ self.pipe.enable_attention_slicing(1)
155
+ logger.info("✓ Attention slicing enabled")
156
+
157
+ # Try CPU offload (helps with memory)
158
+ try:
159
+ self.pipe.enable_model_cpu_offload()
160
+ logger.info("✓ Model CPU offload enabled")
161
+ except Exception as e:
162
+ logger.warning(f"⚠️ CPU offload failed: {e}")
163
+
164
+ # Try xformers (optional)
165
+ try:
166
+ self.pipe.enable_xformers_memory_efficient_attention()
167
+ logger.info("✓ XFormers enabled")
168
+ except Exception as e:
169
+ logger.info(f"ℹ️ XFormers not available: {e}")
170
+
171
+ except Exception as e:
172
+ logger.warning(f"⚠️ Some optimizations failed: {e}")
173
 
174
+ # Step 7: Initialize Canny detector
175
+ logger.info("🔍 Initializing Canny detector...")
176
+ try:
177
+ self.canny_detector = CannyDetector()
178
+ logger.info("✅ Canny detector initialized")
179
+ except Exception as e:
180
+ self.log_error(f"❌ Canny detector initialization failed: {e}")
181
+ # Continue without it - we have OpenCV fallback
182
+
183
+ # Step 8: Test generation (small test)
184
+ logger.info("🧪 Testing pipeline with small generation...")
185
+ try:
186
+ # Create a small test image
187
+ test_image = Image.new('RGB', (512, 512), color='white')
188
+ test_control = self.create_canny_control(test_image)
189
+
190
+ generator = torch.Generator(device=self.device).manual_seed(42)
191
+
192
+ with torch.autocast(self.device, enabled=(self.device=="cuda")):
193
+ test_result = self.pipe(
194
+ prompt="test room",
195
+ image=test_control,
196
+ num_inference_steps=1, # Just 1 step for testing
197
+ height=512,
198
+ width=512,
199
+ generator=generator,
200
+ controlnet_conditioning_scale=0.5,
201
+ guidance_scale=7.5
202
+ ).images[0]
203
+
204
+ logger.info("✅ Pipeline test successful!")
205
+
206
+ # Cleanup test
207
+ del test_result, test_image, test_control
208
+ if self.device == "cuda":
209
+ torch.cuda.empty_cache()
210
+
211
+ except Exception as e:
212
+ self.log_error(f"❌ Pipeline test failed: {e}")
213
+ self.log_error(f"Full traceback: {traceback.format_exc()}")
214
+ self.mode = "fallback"
215
+ self.initialized = True
216
+ return True
217
+
218
+ self.initialized = True
219
+ self.mode = "advanced"
220
+ logger.info("🎉 Advanced AI pipeline fully ready!")
221
+ return True
222
+
223
+ except Exception as e:
224
+ self.log_error(f"❌ Unexpected error during initialization: {e}")
225
+ self.log_error(f"Full traceback: {traceback.format_exc()}")
226
+ self.mode = "fallback"
227
+ self.initialized = True
228
+ return True
229
+
230
+ def prepare_image(self, image: Image.Image) -> Image.Image:
231
+ """Prepare image for processing"""
232
+ if image.mode != 'RGB':
233
+ image = image.convert('RGB')
234
 
235
+ w, h = image.size
236
 
237
+ # Calculate new size maintaining aspect ratio
238
+ if w > h:
239
+ new_w = MAX_RESOLUTION
240
+ new_h = int(h * MAX_RESOLUTION / w)
241
+ else:
242
+ new_h = MAX_RESOLUTION
243
+ new_w = int(w * MAX_RESOLUTION / h)
 
 
244
 
245
+ # Make divisible by 8 for diffusion models
246
+ new_w = (new_w // 8) * 8
247
+ new_h = (new_h // 8) * 8
248
 
249
+ # Ensure minimum size
250
+ new_w = max(new_w, MIN_RESOLUTION)
251
+ new_h = max(new_h, MIN_RESOLUTION)
252
 
253
+ return image.resize((new_w, new_h), Image.LANCZOS)
254
+
255
+ def create_canny_control(self, image: Image.Image) -> Image.Image:
256
+ """Create Canny edge control image"""
257
+ try:
258
+ if self.canny_detector and self.mode == "advanced":
259
+ return self.canny_detector(image, low_threshold=100, high_threshold=200)
260
+ except Exception as e:
261
+ logger.debug(f"Canny detector failed: {e}")
262
 
263
+ # OpenCV fallback (always works)
264
+ img_array = np.array(image)
265
+ gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY)
266
+ edges = cv2.Canny(gray, 100, 200)
267
+ return Image.fromarray(edges)
268
+
269
+ def generate_advanced(self, room_image: Image.Image, inspiration_image: Image.Image,
270
+ prompt: str, control_strength: float, num_steps: int,
271
+ guidance_scale: float, seed: int) -> Image.Image:
272
+ """Generate using SDXL + ControlNet"""
273
 
274
+ logger.info("🎨 Starting advanced generation...")
275
 
276
+ control_image = self.create_canny_control(room_image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277
 
278
+ # Enhanced prompt
279
+ enhanced_prompt = f"{prompt}, professional interior design, high quality, detailed"
280
+ negative_prompt = "blurry, low quality, distorted, deformed, ugly, oversaturated, cluttered"
281
 
282
+ generator = torch.Generator(device=self.device).manual_seed(seed)
 
283
 
284
+ # Clear cache before generation
285
+ if self.device == "cuda":
286
+ torch.cuda.empty_cache()
287
 
288
+ logger.info(f"Generation parameters: steps={num_steps}, guidance={guidance_scale}, control={control_strength}")
289
 
290
+ with torch.autocast(self.device, enabled=(self.device=="cuda")):
291
+ result = self.pipe(
292
+ prompt=enhanced_prompt,
293
+ negative_prompt=negative_prompt,
294
+ image=control_image,
295
+ controlnet_conditioning_scale=control_strength,
296
+ num_inference_steps=num_steps,
297
+ guidance_scale=guidance_scale,
298
+ generator=generator,
299
+ height=room_image.height,
300
+ width=room_image.width
301
+ ).images[0]
302
 
303
+ # Cleanup
304
+ if self.device == "cuda":
305
+ torch.cuda.empty_cache()
306
 
307
+ logger.info("✅ Advanced generation completed")
308
+ return result
309
+
310
+ def style_transfer_fallback(self, room_image: Image.Image, inspiration_image: Image.Image) -> Image.Image:
311
+ """Computer vision-based style transfer"""
312
+ logger.info("🎨 Using computer vision style transfer...")
313
 
314
+ room_array = np.array(room_image).astype(np.float32) / 255.0
315
+ inspiration_array = np.array(inspiration_image).astype(np.float32) / 255.0
 
 
 
 
 
 
 
316
 
317
+ # Convert to LAB color space for better color transfer
318
+ room_lab = cv2.cvtColor((room_array * 255).astype(np.uint8), cv2.COLOR_RGB2LAB).astype(np.float32)
319
+ inspiration_lab = cv2.cvtColor((inspiration_array * 255).astype(np.uint8), cv2.COLOR_RGB2LAB).astype(np.float32)
320
 
321
+ # Transfer color statistics
322
+ for channel in range(3):
323
+ room_mean, room_std = cv2.meanStdDev(room_lab[:,:,channel])
324
+ inspiration_mean, inspiration_std = cv2.meanStdDev(inspiration_lab[:,:,channel])
325
+
326
+ if room_std > 0:
327
+ room_lab[:,:,channel] = ((room_lab[:,:,channel] - room_mean) *
328
+ (inspiration_std / room_std) + inspiration_mean)
329
 
330
+ # Clamp values to valid ranges
331
+ room_lab[:,:,0] = np.clip(room_lab[:,:,0], 0, 100) # L channel
332
+ room_lab[:,:,1] = np.clip(room_lab[:,:,1], -127, 127) # A channel
333
+ room_lab[:,:,2] = np.clip(room_lab[:,:,2], -127, 127) # B channel
334
 
335
+ # Convert back to RGB
336
+ result_array = cv2.cvtColor(room_lab.astype(np.uint8), cv2.COLOR_LAB2RGB)
337
+ result_image = Image.fromarray(result_array)
338
 
339
+ # Enhance the result
340
+ enhancer = ImageEnhance.Contrast(result_image)
341
+ result_image = enhancer.enhance(1.15)
342
 
343
+ enhancer = ImageEnhance.Color(result_image)
344
+ result_image = enhancer.enhance(1.25)
 
 
345
 
346
+ logger.info("✅ Style transfer completed")
347
+ return result_image
348
+
349
+ def transform_room(self, room_image: Image.Image, inspiration_image: Image.Image,
350
+ custom_prompt: str = "", control_strength: float = 0.8,
351
+ style_strength: float = 0.7, num_steps: int = 20,
352
+ guidance_scale: float = 7.5, seed: int = 42) -> Tuple[Image.Image, Dict]:
353
+ """Main transformation function"""
 
 
 
 
 
354
 
355
+ # Initialize if needed
356
+ if not self.initialized:
357
+ self.initialize_pipeline()
358
 
359
+ # Prepare images
360
+ room_prepared = self.prepare_image(room_image)
361
+ inspiration_prepared = self.prepare_image(inspiration_image)
362
 
363
+ # Create control image for display
364
+ control_image = self.create_canny_control(room_prepared)
 
 
 
 
365
 
366
+ try:
367
+ if self.mode == "advanced" and self.pipe is not None:
368
+ if not custom_prompt.strip():
369
+ custom_prompt = "elegant modern interior design, stylish furniture, beautiful lighting"
370
+
371
+ result = self.generate_advanced(
372
+ room_prepared, inspiration_prepared, custom_prompt,
373
+ control_strength, num_steps, guidance_scale, seed
374
+ )
375
+ mode_used = "🤖 SDXL + ControlNet AI"
376
+
377
+ else:
378
+ result = self.style_transfer_fallback(room_prepared, inspiration_prepared)
379
+ mode_used = "🎨 Computer Vision Style Transfer"
380
+
381
+ info = {
382
+ 'control_image': control_image,
383
+ 'mode': mode_used,
384
+ 'prompt': custom_prompt,
385
+ 'device': self.device,
386
+ 'resolution': f"{room_prepared.width}x{room_prepared.height}",
387
+ 'pipeline_mode': self.mode,
388
+ 'error_log': self.error_log.copy() if self.error_log else None
389
+ }
390
+
391
+ return result, info
392
+
393
+ except Exception as e:
394
+ self.log_error(f"Generation failed: {e}")
395
+ self.log_error(f"Full traceback: {traceback.format_exc()}")
396
+
397
+ # Emergency fallback
398
+ result = self.style_transfer_fallback(room_prepared, inspiration_prepared)
399
+ info = {
400
+ 'control_image': control_image,
401
+ 'mode': "🛠️ Emergency Fallback",
402
+ 'error': str(e),
403
+ 'device': self.device,
404
+ 'resolution': f"{room_prepared.width}x{room_prepared.height}",
405
+ 'error_log': self.error_log.copy()
406
+ }
407
+ return result, info
408
 
409
+ # Global pipeline instance
410
+ pipeline = None
411
 
412
+ def get_pipeline():
413
+ """Get or create pipeline"""
414
+ global pipeline
415
+ if pipeline is None:
416
+ pipeline = InteriorDesignPipeline()
417
+ return pipeline
418
 
419
+ def transform_images(room_image, inspiration_image, custom_prompt,
420
+ control_strength, style_strength, num_steps,
421
+ guidance_scale, seed):
422
+ """Main processing function for Gradio interface"""
423
+
424
+ if room_image is None:
425
+ return None, None, "❌ Please upload a room image"
426
+
427
+ if inspiration_image is None:
428
+ return None, None, "❌ Please upload an inspiration image"
429
+
430
+ try:
431
+ pipe = get_pipeline()
432
+
433
+ result, info = pipe.transform_room(
434
+ room_image, inspiration_image, custom_prompt,
435
+ control_strength, style_strength, num_steps,
436
+ guidance_scale, seed
437
+ )
438
+
439
+ # Create detailed status message with debug info
440
+ status = f"""✅ Transformation Complete!
441
 
442
+ 🔧 Mode: {info['mode']}
443
+ 💻 Device: {info['device']}
444
+ 📐 Resolution: {info['resolution']}
445
+ 🧠 Pipeline Status: {info.get('pipeline_mode', 'unknown')}
446
+ 📝 Prompt: {info['prompt'][:100]}{'...' if len(info['prompt']) > 100 else ''}
447
 
448
+ {get_mode_description(info.get('pipeline_mode', 'unknown'))}
449
+ """
450
+
451
+ # Add error information if available
452
+ if info.get('error_log'):
453
+ status += f"\n\n🔍 DEBUG INFO:\n"
454
+ for i, error in enumerate(info['error_log'][-3:]): # Show last 3 errors
455
+ status += f"{i+1}. {error[:100]}{'...' if len(error) > 100 else ''}\n"
456
+
457
+ if 'error' in info:
458
+ status += f"\n⚠️ Last Error: {info['error'][:150]}{'...' if len(info['error']) > 150 else ''}"
459
 
460
+ return result, info['control_image'], status
 
461
 
462
  except Exception as e:
463
+ error_msg = f"""❌ Critical Processing Error
464
 
465
+ Error: {str(e)[:300]}{'...' if len(str(e)) > 300 else ''}
466
 
467
+ Full traceback:
468
+ {traceback.format_exc()[:500]}{'...' if len(traceback.format_exc()) > 500 else ''}
 
 
 
469
 
470
+ This information can help debug the issue."""
471
+
472
+ return None, None, error_msg
473
+
474
+ def get_mode_description(mode):
475
+ """Get description for current mode"""
476
+ descriptions = {
477
+ "advanced": "🚀 Using state-of-the-art AI models (SDXL + ControlNet) - Full AI processing active!",
478
+ "fallback": "🎨 Using computer vision algorithms for style transfer - AI models failed to load",
479
+ "uninitialized": "⏳ Pipeline initializing...",
480
+ "unknown": "🔄 Processing with available methods"
481
+ }
482
+ return descriptions.get(mode, "🔄 Processing...")
483
+
484
+ def get_debug_info():
485
+ """Get system debug information"""
486
+ info = f"""🔍 System Debug Information:
487
+
488
+ 💻 Device: {DEVICE}
489
+ 🔢 Data Type: {DTYPE}
490
+ 🎮 CUDA Available: {torch.cuda.is_available()}
491
+ """
492
+
493
+ if torch.cuda.is_available():
494
+ info += f"""🎮 CUDA Device: {torch.cuda.get_device_name()}
495
+ 💾 CUDA Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f}GB
496
+ """
497
+
498
+ info += f"""🔑 HF Token: {'Available' if HF_TOKEN else 'Not set'}
499
+ 📦 PyTorch Version: {torch.__version__}
500
+ """
501
+
502
+ try:
503
+ import diffusers
504
+ info += f"📦 Diffusers Version: {diffusers.__version__}\n"
505
+ except:
506
+ info += "📦 Diffusers: Not available\n"
507
+
508
+ try:
509
+ import transformers
510
+ info += f"📦 Transformers Version: {transformers.__version__}\n"
511
+ except:
512
+ info += "📦 Transformers: Not available\n"
513
 
514
+ return info
 
515
 
516
+ def create_gradio_interface():
517
+ """Create the Gradio interface with debug info"""
518
 
519
  with gr.Blocks(
520
+ title="Interior Design AI - Debug Mode",
521
+ theme=gr.themes.Soft(),
522
  css="""
523
+ .gradio-container {
524
+ max-width: 1400px !important;
525
+ }
526
+ .status-box {
527
+ font-family: monospace;
528
+ font-size: 11px;
529
+ }
530
  """
531
  ) as demo:
532
 
533
+ gr.Markdown("""
534
+ # 🏠 Interior Design AI Transformer (Debug Mode)
535
+
536
+ This debug version provides detailed information about what's happening during model loading and processing.
537
+ """)
538
+
539
+ with gr.Tabs():
540
+ with gr.TabItem("🚀 Transform"):
541
+ with gr.Row():
542
+ with gr.Column(scale=1):
543
+ gr.Markdown("### 📤 Input")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
544
 
545
+ room_input = gr.Image(type="pil", label="🏠 Your Room Photo", height=300)
546
+ inspiration_input = gr.Image(type="pil", label="🎨 Style Inspiration", height=300)
 
 
 
 
 
547
 
548
+ prompt_input = gr.Textbox(
549
+ label="📝 Custom Style Description (Optional)",
550
+ placeholder="modern minimalist, Scandinavian design, warm lighting...",
551
+ lines=3
 
 
 
552
  )
553
 
554
+ with gr.Row():
555
+ control_strength = gr.Slider(0.5, 1.0, 0.8, label="🏗️ Structure Preservation")
556
+ style_strength = gr.Slider(0.3, 1.0, 0.7, label="🎨 Style Intensity")
557
+
558
+ with gr.Accordion("⚙️ Advanced Settings", open=False):
559
+ num_steps = gr.Slider(10, 30, 20, step=5, label="🔄 Steps")
560
+ guidance_scale = gr.Slider(3.0, 12.0, 7.5, label="🎯 Guidance")
561
+ seed = gr.Number(42, label="🎲 Seed", precision=0)
562
+
563
+ generate_btn = gr.Button("🚀 Transform Room", variant="primary", size="lg")
 
 
 
 
 
 
564
 
565
+ with gr.Column(scale=1):
566
+ gr.Markdown("### 📊 Results")
567
+ result_output = gr.Image(type="pil", label=" Result", height=400)
568
+ control_output = gr.Image(type="pil", label="🔍 Edge Control", height=200)
 
 
569
 
570
+ status_output = gr.Textbox(
571
+ label="📊 Detailed Status & Debug Info",
572
+ lines=15,
573
+ interactive=False,
574
+ value="🔧 Ready! Upload images and click Transform to see detailed processing info.",
575
+ elem_classes=["status-box"]
576
  )
577
 
578
+ with gr.TabItem("🔍 Debug Info"):
579
+ gr.Markdown("### 🔍 System Information")
580
+ debug_info = gr.Textbox(
581
+ value=get_debug_info(),
582
+ lines=15,
583
+ interactive=False,
584
+ elem_classes=["status-box"]
585
+ )
586
+
587
+ refresh_debug = gr.Button("🔄 Refresh Debug Info")
588
+ refresh_debug.click(fn=get_debug_info, outputs=debug_info)
589
+
590
+ # Event handling
591
+ generate_btn.click(
592
+ fn=transform_images,
593
+ inputs=[
594
+ room_input, inspiration_input, prompt_input,
595
+ control_strength, style_strength, num_steps,
596
+ guidance_scale, seed
597
+ ],
598
+ outputs=[result_output, control_output, status_output],
599
+ show_progress=True
600
+ )
601
+
602
+ gr.Markdown("""
603
+ ### 🔍 Debug Information
604
+ This version shows detailed logs to help identify why the AI models might not be loading.
605
+ Check the "Debug Info" tab and the status messages for clues.
606
+ """)
607
 
608
  return demo
609
 
 
610
  if __name__ == "__main__":
611
+ logger.info("🏠 Starting Interior Design AI (Debug Mode)")
 
 
612
 
613
+ # Show initial debug info
614
+ print("\n" + "="*50)
615
+ print(get_debug_info())
616
+ print("="*50 + "\n")
617
+
618
+ # Create and launch interface
619
+ demo = create_gradio_interface()
620
+ demo.launch(
621
+ server_name="0.0.0.0",
622
+ server_port=7860,
623
+ show_error=True,
624
+ share=False
625
+ )