|
|
import gradio as gr |
|
|
import torch |
|
|
import numpy as np |
|
|
from PIL import Image |
|
|
import cv2 |
|
|
import os |
|
|
from pathlib import Path |
|
|
import json |
|
|
import time |
|
|
from typing import Tuple, Optional, Dict, Any |
|
|
import warnings |
|
|
warnings.filterwarnings("ignore") |
|
|
|
|
|
|
|
|
from config import Config |
|
|
from segmentation import RoomSegmentation |
|
|
from style_extractor import StyleExtractor |
|
|
from utils import load_image_safe, save_image_safe, create_comparison_image, create_multi_comparison_image, enhance_image_quality |
|
|
|
|
|
class AdvancedHFSpacesPipeline: |
|
|
"""Advanced pipeline for Hugging Face Spaces with full capabilities""" |
|
|
|
|
|
def __init__(self): |
|
|
self.device = "cpu" |
|
|
self.segmentation = None |
|
|
self.style_extractor = None |
|
|
self.models_loaded = False |
|
|
self.config = Config() |
|
|
|
|
|
|
|
|
self.config.IMAGE_SIZE = 768 |
|
|
self.config.NUM_INFERENCE_STEPS = 30 |
|
|
self.config.BATCH_SIZE = 1 |
|
|
|
|
|
def load_models(self): |
|
|
"""Load advanced models on-demand""" |
|
|
if self.models_loaded: |
|
|
return |
|
|
|
|
|
print("Loading advanced models...") |
|
|
|
|
|
|
|
|
try: |
|
|
self.segmentation = RoomSegmentation(device=self.device) |
|
|
print("β Advanced segmentation model loaded") |
|
|
except Exception as e: |
|
|
print(f"β Segmentation model failed: {e}") |
|
|
self.segmentation = None |
|
|
|
|
|
|
|
|
try: |
|
|
self.style_extractor = StyleExtractor() |
|
|
print("β Advanced style extractor loaded") |
|
|
except Exception as e: |
|
|
print(f"β Style extractor failed: {e}") |
|
|
self.style_extractor = None |
|
|
|
|
|
self.models_loaded = True |
|
|
print("Advanced models loaded successfully!") |
|
|
|
|
|
def advanced_style_transfer(self, user_room: Image.Image, inspiration_room: Image.Image) -> Tuple[Image.Image, str, Dict]: |
|
|
"""Advanced style transfer using full pipeline capabilities""" |
|
|
|
|
|
|
|
|
user_np = np.array(user_room) |
|
|
inspiration_np = np.array(inspiration_room) |
|
|
|
|
|
|
|
|
target_size = (self.config.IMAGE_SIZE, self.config.IMAGE_SIZE) |
|
|
user_resized = cv2.resize(user_np, target_size) |
|
|
inspiration_resized = cv2.resize(inspiration_np, target_size) |
|
|
|
|
|
|
|
|
style_info = self._extract_advanced_style(inspiration_resized) |
|
|
|
|
|
|
|
|
preservation_mask = self._create_advanced_mask(user_resized) |
|
|
|
|
|
|
|
|
result = self._apply_advanced_style_transfer( |
|
|
user_resized, inspiration_resized, preservation_mask, style_info |
|
|
) |
|
|
|
|
|
|
|
|
result = self._advanced_post_processing(result, user_resized, preservation_mask, style_info) |
|
|
|
|
|
|
|
|
result_pil = Image.fromarray(result) |
|
|
|
|
|
return result_pil, style_info['summary'], style_info |
|
|
|
|
|
def _extract_advanced_style(self, image: np.ndarray) -> Dict[str, Any]: |
|
|
"""Extract comprehensive style information""" |
|
|
if self.style_extractor: |
|
|
try: |
|
|
return self.style_extractor.extract_style(image) |
|
|
except: |
|
|
pass |
|
|
|
|
|
|
|
|
return self._advanced_style_analysis(image) |
|
|
|
|
|
def _advanced_style_analysis(self, image: np.ndarray) -> Dict[str, Any]: |
|
|
"""Advanced style analysis when ML models aren't available""" |
|
|
|
|
|
lab = cv2.cvtColor(image, cv2.COLOR_RGB2LAB) |
|
|
|
|
|
|
|
|
l_channel, a_channel, b_channel = cv2.split(lab) |
|
|
|
|
|
|
|
|
l_mean, l_std = np.mean(l_channel), np.std(l_channel) |
|
|
a_mean, a_std = np.mean(a_channel), np.std(a_channel) |
|
|
b_mean, b_std = np.mean(b_channel), np.std(b_channel) |
|
|
|
|
|
|
|
|
if l_mean > 150: |
|
|
tone = "bright" |
|
|
mood = "energetic" |
|
|
elif l_mean < 100: |
|
|
tone = "dark" |
|
|
mood = "cozy" |
|
|
else: |
|
|
tone = "medium" |
|
|
mood = "balanced" |
|
|
|
|
|
|
|
|
if a_mean > 10 and b_mean > 10: |
|
|
temp = "warm" |
|
|
color_style = "sunny" |
|
|
elif a_mean < -10 and b_mean < -10: |
|
|
temp = "cool" |
|
|
color_style = "serene" |
|
|
else: |
|
|
temp = "neutral" |
|
|
color_style = "balanced" |
|
|
|
|
|
|
|
|
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) |
|
|
texture_features = self._analyze_texture(gray) |
|
|
|
|
|
|
|
|
layout_info = self._analyze_layout(gray) |
|
|
|
|
|
|
|
|
style_summary = f"{tone} {temp} {mood} interior with {color_style} colors" |
|
|
style_summary += f", {texture_features['texture_type']} textures" |
|
|
style_summary += f", {layout_info['layout_type']} layout" |
|
|
|
|
|
return { |
|
|
'summary': style_summary, |
|
|
'tone': tone, |
|
|
'temperature': temp, |
|
|
'mood': mood, |
|
|
'color_style': color_style, |
|
|
'brightness': { |
|
|
'mean': float(l_mean), |
|
|
'std': float(l_std), |
|
|
'contrast': float(l_std / l_mean) if l_mean > 0 else 0 |
|
|
}, |
|
|
'color_balance': { |
|
|
'warmth': float(a_mean), |
|
|
'coolness': float(b_mean) |
|
|
}, |
|
|
'texture': texture_features, |
|
|
'layout': layout_info |
|
|
} |
|
|
|
|
|
def _analyze_texture(self, gray_image: np.ndarray) -> Dict[str, Any]: |
|
|
"""Analyze texture characteristics""" |
|
|
|
|
|
edges = cv2.Canny(gray_image, 50, 150) |
|
|
edge_density = np.sum(edges > 0) / edges.size |
|
|
|
|
|
|
|
|
lbp = self._compute_lbp(gray_image) |
|
|
lbp_std = np.std(lbp) |
|
|
|
|
|
|
|
|
if edge_density > 0.1: |
|
|
texture_type = "detailed" |
|
|
elif lbp_std > 20: |
|
|
texture_type = "textured" |
|
|
else: |
|
|
texture_type = "smooth" |
|
|
|
|
|
return { |
|
|
'texture_type': texture_type, |
|
|
'edge_density': float(edge_density), |
|
|
'texture_variation': float(lbp_std) |
|
|
} |
|
|
|
|
|
def _compute_lbp(self, image: np.ndarray, radius: int = 3, n_points: int = 8) -> np.ndarray: |
|
|
"""Compute Local Binary Pattern""" |
|
|
height, width = image.shape |
|
|
lbp = np.zeros((height, width), dtype=np.uint8) |
|
|
|
|
|
for i in range(radius, height - radius): |
|
|
for j in range(radius, width - radius): |
|
|
center = image[i, j] |
|
|
code = 0 |
|
|
for k in range(n_points): |
|
|
angle = 2 * np.pi * k / n_points |
|
|
x = int(i + radius * np.cos(angle)) |
|
|
y = int(j + radius * np.sin(angle)) |
|
|
if image[x, y] >= center: |
|
|
code |= (1 << k) |
|
|
lbp[i, j] = code |
|
|
|
|
|
return lbp |
|
|
|
|
|
def _analyze_layout(self, gray_image: np.ndarray) -> Dict[str, Any]: |
|
|
"""Analyze room layout and composition""" |
|
|
|
|
|
edges = cv2.Canny(gray_image, 30, 100) |
|
|
contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) |
|
|
|
|
|
|
|
|
areas = [cv2.contourArea(c) for c in contours] |
|
|
if areas: |
|
|
max_area = max(areas) |
|
|
avg_area = np.mean(areas) |
|
|
area_variation = np.std(areas) |
|
|
else: |
|
|
max_area = avg_area = area_variation = 0 |
|
|
|
|
|
|
|
|
if max_area > gray_image.size * 0.3: |
|
|
layout_type = "open" |
|
|
elif area_variation > avg_area * 0.5: |
|
|
layout_type = "varied" |
|
|
else: |
|
|
layout_type = "balanced" |
|
|
|
|
|
return { |
|
|
'layout_type': layout_type, |
|
|
'max_area': float(max_area), |
|
|
'avg_area': float(avg_area), |
|
|
'area_variation': float(area_variation) |
|
|
} |
|
|
|
|
|
def _create_advanced_mask(self, image: np.ndarray) -> np.ndarray: |
|
|
"""Create advanced preservation mask""" |
|
|
if self.segmentation: |
|
|
try: |
|
|
|
|
|
masks = self.segmentation.segment_room(image) |
|
|
preservation_mask = self.segmentation.create_preservation_mask( |
|
|
masks, self.config.PRESERVE_CLASSES |
|
|
) |
|
|
return preservation_mask |
|
|
except: |
|
|
pass |
|
|
|
|
|
|
|
|
return self._advanced_rule_based_mask(image) |
|
|
|
|
|
def _advanced_rule_based_mask(self, image: np.ndarray) -> np.ndarray: |
|
|
"""Advanced rule-based structural mask creation""" |
|
|
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) |
|
|
|
|
|
|
|
|
edges_fine = cv2.Canny(gray, 30, 100) |
|
|
edges_coarse = cv2.Canny(gray, 50, 150) |
|
|
edges = cv2.bitwise_or(edges_fine, edges_coarse) |
|
|
|
|
|
|
|
|
kernel_small = np.ones((3, 3), np.uint8) |
|
|
kernel_large = np.ones((7, 7), np.uint8) |
|
|
|
|
|
|
|
|
dilated = cv2.dilate(edges, kernel_small, iterations=2) |
|
|
|
|
|
|
|
|
closed = cv2.morphologyEx(dilated, cv2.MORPH_CLOSE, kernel_large) |
|
|
|
|
|
|
|
|
cleaned = cv2.morphologyEx(closed, cv2.MORPH_OPEN, kernel_small) |
|
|
|
|
|
|
|
|
mask = (cleaned > 0).astype(np.uint8) * 255 |
|
|
|
|
|
|
|
|
mask = cv2.GaussianBlur(mask, (15, 15), 5) |
|
|
|
|
|
return mask |
|
|
|
|
|
def _apply_advanced_style_transfer(self, user_img: np.ndarray, inspiration_img: np.ndarray, |
|
|
mask: np.ndarray, style_info: Dict[str, Any]) -> np.ndarray: |
|
|
"""Apply advanced style transfer with multiple techniques""" |
|
|
|
|
|
|
|
|
user_float = user_img.astype(np.float32) / 255.0 |
|
|
inspiration_float = inspiration_img.astype(np.float32) / 255.0 |
|
|
|
|
|
|
|
|
result = self._advanced_color_transfer(user_float, inspiration_float, mask) |
|
|
|
|
|
|
|
|
result = self._advanced_texture_transfer(result, inspiration_float, mask) |
|
|
|
|
|
|
|
|
result = self._adjust_lighting(result, style_info) |
|
|
|
|
|
|
|
|
result = np.clip(result, 0, 1) |
|
|
result = (result * 255).astype(np.uint8) |
|
|
|
|
|
return result |
|
|
|
|
|
def _advanced_color_transfer(self, user_img: np.ndarray, inspiration_img: np.ndarray, |
|
|
mask: np.ndarray) -> np.ndarray: |
|
|
"""Advanced color transfer using multiple techniques""" |
|
|
|
|
|
|
|
|
inspiration_reshaped = inspiration_img.reshape(-1, 3) |
|
|
|
|
|
try: |
|
|
from sklearn.cluster import KMeans |
|
|
|
|
|
|
|
|
kmeans = KMeans(n_clusters=8, random_state=42, n_init=10) |
|
|
kmeans.fit(inspiration_reshaped) |
|
|
palette = kmeans.cluster_centers_ |
|
|
|
|
|
|
|
|
user_reshaped = user_img.reshape(-1, 3) |
|
|
distances = np.linalg.norm(user_reshaped[:, np.newaxis] - palette, axis=2) |
|
|
closest_colors = palette[np.argmin(distances, axis=1)] |
|
|
closest_colors = closest_colors.reshape(user_img.shape) |
|
|
|
|
|
|
|
|
mask_norm = mask.astype(np.float32) / 255.0 |
|
|
mask_norm = np.stack([mask_norm] * 3, axis=-1) |
|
|
|
|
|
|
|
|
result = user_img * mask_norm + closest_colors * (1 - mask_norm) |
|
|
|
|
|
except: |
|
|
|
|
|
result = self._histogram_matching(user_img, inspiration_img, mask) |
|
|
|
|
|
return result |
|
|
|
|
|
def _histogram_matching(self, user_img: np.ndarray, inspiration_img: np.ndarray, |
|
|
mask: np.ndarray) -> np.ndarray: |
|
|
"""Histogram matching for color transfer""" |
|
|
result = user_img.copy() |
|
|
|
|
|
for i in range(3): |
|
|
|
|
|
user_hist, _ = np.histogram(user_img[:, :, i].flatten(), 256, [0, 1]) |
|
|
insp_hist, _ = np.histogram(inspiration_img[:, :, i].flatten(), 256, [0, 1]) |
|
|
|
|
|
|
|
|
user_cdf = user_hist.cumsum() |
|
|
insp_cdf = insp_hist.cumsum() |
|
|
|
|
|
|
|
|
user_cdf = user_cdf / user_cdf[-1] |
|
|
insp_cdf = insp_cdf / insp_cdf[-1] |
|
|
|
|
|
|
|
|
lookup = np.interp(user_cdf, insp_cdf, np.arange(256)) |
|
|
lookup = lookup / 255.0 |
|
|
|
|
|
|
|
|
channel = user_img[:, :, i] |
|
|
result[:, :, i] = np.interp(channel, np.arange(256) / 255.0, lookup) |
|
|
|
|
|
|
|
|
mask_norm = mask.astype(np.float32) / 255.0 |
|
|
mask_norm = np.stack([mask_norm] * 3, axis=-1) |
|
|
|
|
|
return user_img * mask_norm + result * (1 - mask_norm) |
|
|
|
|
|
def _advanced_texture_transfer(self, image: np.ndarray, inspiration_img: np.ndarray, |
|
|
mask: np.ndarray) -> np.ndarray: |
|
|
"""Advanced texture transfer""" |
|
|
|
|
|
inspiration_gray = cv2.cvtColor(inspiration_img, cv2.COLOR_RGB2GRAY).astype(np.float32) |
|
|
user_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY).astype(np.float32) |
|
|
|
|
|
|
|
|
scales = [1.0, 0.5, 0.25] |
|
|
texture_result = np.zeros_like(user_gray) |
|
|
|
|
|
for scale in scales: |
|
|
if scale != 1.0: |
|
|
scaled_insp = cv2.resize(inspiration_gray, None, fx=scale, fy=scale) |
|
|
scaled_user = cv2.resize(user_gray, None, fx=scale, fy=scale) |
|
|
scaled_mask = cv2.resize(mask, None, fx=scale, fy=scale) |
|
|
else: |
|
|
scaled_insp = inspiration_gray |
|
|
scaled_user = user_gray |
|
|
scaled_mask = mask |
|
|
|
|
|
|
|
|
texture_factor = 0.3 * scale |
|
|
scaled_result = scaled_user * (1 - texture_factor) + scaled_insp * texture_factor |
|
|
|
|
|
|
|
|
if scale != 1.0: |
|
|
scaled_result = cv2.resize(scaled_result, (user_gray.shape[1], user_gray.shape[0])) |
|
|
scaled_mask = cv2.resize(scaled_mask, (user_gray.shape[1], user_gray.shape[0])) |
|
|
|
|
|
texture_result += scaled_result * (scaled_mask / 255.0) |
|
|
|
|
|
|
|
|
texture_result = texture_result / len(scales) |
|
|
|
|
|
|
|
|
for i in range(3): |
|
|
image[:, :, i] = image[:, :, i] * 0.7 + texture_result * 0.3 |
|
|
|
|
|
return image |
|
|
|
|
|
def _adjust_lighting(self, image: np.ndarray, style_info: Dict[str, Any]) -> np.ndarray: |
|
|
"""Adjust lighting based on style analysis""" |
|
|
|
|
|
brightness = style_info.get('brightness', {}) |
|
|
target_brightness = brightness.get('mean', 128) / 255.0 |
|
|
|
|
|
|
|
|
lab = cv2.cvtColor(image, cv2.COLOR_RGB2LAB) |
|
|
l_channel = lab[:, :, 0].astype(np.float32) / 255.0 |
|
|
|
|
|
|
|
|
current_brightness = np.mean(l_channel) |
|
|
brightness_factor = target_brightness / current_brightness if current_brightness > 0 else 1.0 |
|
|
|
|
|
|
|
|
l_channel = np.clip(l_channel * brightness_factor, 0, 1) |
|
|
|
|
|
|
|
|
lab[:, :, 0] = l_channel * 255 |
|
|
result = cv2.cvtColor(lab, cv2.COLOR_LAB2RGB) |
|
|
|
|
|
return result |
|
|
|
|
|
def _advanced_post_processing(self, result: np.ndarray, user_img: np.ndarray, |
|
|
mask: np.ndarray, style_info: Dict[str, Any]) -> np.ndarray: |
|
|
"""Advanced post-processing for final result""" |
|
|
|
|
|
|
|
|
result = self._enhanced_blending(result, user_img, mask) |
|
|
|
|
|
|
|
|
result = self._advanced_color_correction(result, style_info) |
|
|
|
|
|
|
|
|
result = enhance_image_quality( |
|
|
result, |
|
|
sharpness=0.4, |
|
|
contrast=1.1, |
|
|
saturation=1.05 |
|
|
) |
|
|
|
|
|
return result |
|
|
|
|
|
def _enhanced_blending(self, result: np.ndarray, user_img: np.ndarray, |
|
|
mask: np.ndarray) -> np.ndarray: |
|
|
"""Enhanced blending for seamless integration""" |
|
|
|
|
|
mask_smooth = cv2.GaussianBlur(mask, (25, 25), 8) |
|
|
mask_norm = mask_smooth.astype(np.float32) / 255.0 |
|
|
mask_norm = np.stack([mask_norm] * 3, axis=-1) |
|
|
|
|
|
|
|
|
alpha = 0.8 |
|
|
result = result * (1 - alpha * mask_norm) + user_img * (alpha * mask_norm) |
|
|
|
|
|
return result |
|
|
|
|
|
def _advanced_color_correction(self, image: np.ndarray, style_info: Dict[str, Any]) -> np.ndarray: |
|
|
"""Advanced color correction based on style analysis""" |
|
|
|
|
|
color_balance = style_info.get('color_balance', {}) |
|
|
warmth = color_balance.get('warmth', 0) |
|
|
coolness = color_balance.get('coolness', 0) |
|
|
|
|
|
|
|
|
lab = cv2.cvtColor(image, cv2.COLOR_RGB2LAB) |
|
|
|
|
|
|
|
|
if abs(warmth) > 5: |
|
|
lab[:, :, 1] = np.clip(lab[:, :, 1] + warmth * 0.1, 0, 255) |
|
|
|
|
|
if abs(coolness) > 5: |
|
|
lab[:, :, 2] = np.clip(lab[:, :, 2] + coolness * 0.1, 0, 255) |
|
|
|
|
|
|
|
|
result = cv2.cvtColor(lab, cv2.COLOR_LAB2RGB) |
|
|
|
|
|
return result |
|
|
|
|
|
def process_images(self, user_room: Image.Image, inspiration_room: Image.Image) -> Tuple[Image.Image, str, Dict]: |
|
|
"""Main processing function for Gradio""" |
|
|
try: |
|
|
|
|
|
self.load_models() |
|
|
|
|
|
|
|
|
result, style_summary, full_style_info = self.advanced_style_transfer(user_room, inspiration_room) |
|
|
|
|
|
|
|
|
|
|
|
user_np = np.array(user_room) |
|
|
inspiration_np = np.array(inspiration_room) |
|
|
result_np = np.array(result) |
|
|
|
|
|
|
|
|
if len(user_np.shape) == 3 and user_np.shape[2] == 3: |
|
|
user_np = cv2.cvtColor(user_np, cv2.COLOR_RGB2BGR) |
|
|
if len(inspiration_np.shape) == 3 and inspiration_np.shape[2] == 3: |
|
|
inspiration_np = cv2.cvtColor(inspiration_np, cv2.COLOR_RGB2BGR) |
|
|
if len(result_np.shape) == 3 and result_np.shape[2] == 3: |
|
|
result_np = cv2.cvtColor(result_np, cv2.COLOR_RGB2BGR) |
|
|
|
|
|
comparison = create_multi_comparison_image( |
|
|
[user_np, inspiration_np, result_np], |
|
|
titles=["Your Room", "Inspiration", "Advanced Result"], |
|
|
title="Advanced Interior Style Transfer" |
|
|
) |
|
|
|
|
|
|
|
|
comparison = Image.fromarray(cv2.cvtColor(comparison, cv2.COLOR_BGR2RGB)) |
|
|
|
|
|
return comparison, style_summary, full_style_info |
|
|
|
|
|
except Exception as e: |
|
|
error_msg = f"Advanced processing failed: {str(e)}" |
|
|
print(error_msg) |
|
|
|
|
|
|
|
|
try: |
|
|
return result, style_summary, full_style_info |
|
|
except: |
|
|
return None, error_msg, {} |
|
|
|
|
|
|
|
|
pipeline = AdvancedHFSpacesPipeline() |
|
|
|
|
|
|
|
|
def create_advanced_interface(): |
|
|
with gr.Blocks(title="Advanced Interior Style Transfer", theme=gr.themes.Soft()) as demo: |
|
|
gr.Markdown(""" |
|
|
# π Advanced Interior Style Transfer Pipeline |
|
|
|
|
|
**Professional-grade interior style transformation with AI-powered analysis!** |
|
|
|
|
|
**Advanced Features:** |
|
|
- π§ ML-powered room segmentation |
|
|
- π¨ Comprehensive style analysis (colors, textures, layout, lighting) |
|
|
- π Advanced structural preservation |
|
|
- β¨ Multi-scale texture transfer |
|
|
- π Intelligent post-processing |
|
|
|
|
|
**Optimized for Hugging Face Spaces Free Tier** π |
|
|
""") |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(): |
|
|
user_room_input = gr.Image( |
|
|
label="Your Room", |
|
|
type="pil", |
|
|
height=300 |
|
|
) |
|
|
inspiration_room_input = gr.Image( |
|
|
label="Inspiration Room", |
|
|
type="pil", |
|
|
height=300 |
|
|
) |
|
|
|
|
|
transfer_btn = gr.Button( |
|
|
"π Advanced Style Transfer", |
|
|
variant="primary", |
|
|
size="lg" |
|
|
) |
|
|
|
|
|
with gr.Column(): |
|
|
output_image = gr.Image( |
|
|
label="Advanced Result", |
|
|
height=400 |
|
|
) |
|
|
style_summary = gr.Textbox( |
|
|
label="Style Analysis Summary", |
|
|
lines=2, |
|
|
interactive=False |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Accordion("π Detailed Style Analysis", open=False): |
|
|
style_details = gr.JSON( |
|
|
label="Complete Style Information" |
|
|
) |
|
|
|
|
|
|
|
|
def process_advanced_style_transfer(user_img, inspiration_img): |
|
|
if user_img is None or inspiration_img is None: |
|
|
return None, "Please upload both images", {} |
|
|
|
|
|
return pipeline.process_images(user_img, inspiration_img) |
|
|
|
|
|
|
|
|
transfer_btn.click( |
|
|
fn=process_advanced_style_transfer, |
|
|
inputs=[user_room_input, inspiration_room_input], |
|
|
outputs=[output_image, style_summary, style_details] |
|
|
) |
|
|
|
|
|
|
|
|
gr.Markdown(""" |
|
|
--- |
|
|
**Built with:** Advanced ML Models, OpenCV, scikit-learn, PIL, Custom AI Pipeline |
|
|
|
|
|
**Optimized for:** Hugging Face Spaces Free Tier CPU |
|
|
|
|
|
**Professional Features:** ML Segmentation, Advanced Style Analysis, Multi-scale Processing, Intelligent Blending |
|
|
""") |
|
|
|
|
|
return demo |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo = create_advanced_interface() |
|
|
demo.launch( |
|
|
server_name="0.0.0.0", |
|
|
server_port=7860, |
|
|
share=False, |
|
|
show_error=True |
|
|
) |
|
|
|
|
|
|