|
|
def create_sam_mask(self, image, bbox_coords, mode): |
|
|
""" |
|
|
ERWEITERTE Funktion: Erstellt präzise Maske mit SAM 2 |
|
|
""" |
|
|
try: |
|
|
print("#" * 80) |
|
|
print("# 🎯 STARTE SAM 2 SEGMENTIERUNG") |
|
|
print("#" * 80) |
|
|
print(f"📐 Eingabebild-Größe: {image.size}") |
|
|
print(f"🎛️ Ausgewählter Modus: {mode}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
original_image = image |
|
|
|
|
|
|
|
|
if not self.sam_initialized: |
|
|
print("📥 SAM 2 ist noch nicht geladen, starte Lazy Loading...") |
|
|
self._lazy_load_sam() |
|
|
|
|
|
if self.sam_model is None or self.sam_processor is None: |
|
|
print("⚠️ SAM 2 Model nicht verfügbar, verwende Fallback") |
|
|
return self._create_rectangular_mask(image, bbox_coords, mode) |
|
|
|
|
|
|
|
|
x1, y1, x2, y2 = self._validate_bbox(image, bbox_coords) |
|
|
original_bbox = (x1, y1, x2, y2) |
|
|
print(f"📏 Original-BBox Größe: {x2-x1} × {y2-y1} px") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if mode == "environment_change": |
|
|
print("-" * 60) |
|
|
print("🌳 MODUS: ENVIRONMENT_CHANGE") |
|
|
print("-" * 60) |
|
|
|
|
|
|
|
|
image_np = np.array(image.convert("RGB")) |
|
|
|
|
|
|
|
|
input_boxes = [[[x1, y1, x2, y2]]] |
|
|
|
|
|
|
|
|
|
|
|
inputs = self.sam_processor( |
|
|
image_np, |
|
|
input_boxes=input_boxes, |
|
|
return_tensors="pt" |
|
|
).to(self.device) |
|
|
|
|
|
print(f" - 'input_boxes' Shape: {inputs['input_boxes'].shape}") |
|
|
|
|
|
|
|
|
print("-" * 60) |
|
|
print("🧠 SAM 2 INFERENZ (Vorhersage)") |
|
|
with torch.no_grad(): |
|
|
print(" Führe Vorhersage durch...") |
|
|
outputs = self.sam_model(**inputs) |
|
|
print(f"✅ Vorhersage abgeschlossen") |
|
|
print(f" Anzahl der Vorhersagemasken: {outputs.pred_masks.shape[2]}") |
|
|
|
|
|
num_masks = outputs.pred_masks.shape[2] |
|
|
print(f" SAM lieferte {num_masks} verschiedene Masken") |
|
|
|
|
|
|
|
|
all_masks = [] |
|
|
|
|
|
for i in range(num_masks): |
|
|
single_mask = outputs.pred_masks[:, :, i, :, :] |
|
|
resized_mask = F.interpolate( |
|
|
single_mask, |
|
|
size=(image.height, image.width), |
|
|
mode='bilinear', |
|
|
align_corners=False |
|
|
).squeeze() |
|
|
|
|
|
mask_np = resized_mask.sigmoid().cpu().numpy() |
|
|
all_masks.append(mask_np) |
|
|
|
|
|
|
|
|
bbox_center = ((x1 + x2) // 2, (y1 + y2) // 2) |
|
|
bbox_area = (x2 - x1) * (y2 - y1) |
|
|
print(f" Erwartetes BBox-Zentrum: {bbox_center}") |
|
|
print(f" Erwartete BBox-Fläche: {bbox_area:,} Pixel") |
|
|
|
|
|
print("🤔 HEURISTIK: Beste Maske auswählen") |
|
|
best_mask_idx = 0 |
|
|
best_score = -1 |
|
|
|
|
|
|
|
|
for i in range(num_masks): |
|
|
mask_np_temp = all_masks[i] |
|
|
|
|
|
|
|
|
mask_max = mask_np_temp.max() |
|
|
if mask_max < 0.3: |
|
|
continue |
|
|
|
|
|
adaptive_threshold = max(0.3, mask_max * 0.7) |
|
|
mask_binary = (mask_np_temp > adaptive_threshold).astype(np.uint8) |
|
|
|
|
|
|
|
|
if np.sum(mask_binary) == 0: |
|
|
print(f" ❌ Maske {i+1}: Keine Pixel nach adaptive_threshold {adaptive_threshold:.3f}") |
|
|
continue |
|
|
|
|
|
|
|
|
mask_area_pixels = np.sum(mask_binary) |
|
|
|
|
|
|
|
|
bbox_mask = np.zeros((image.height, image.width), dtype=np.uint8) |
|
|
bbox_mask[y1:y2, x1:x2] = 1 |
|
|
|
|
|
overlap = np.sum(mask_binary & bbox_mask) |
|
|
bbox_overlap_ratio = overlap / np.sum(bbox_mask) if np.sum(bbox_mask) > 0 else 0 |
|
|
|
|
|
|
|
|
y_coords, x_coords = np.where(mask_binary > 0) |
|
|
if len(y_coords) > 0: |
|
|
centroid_y = np.mean(y_coords) |
|
|
centroid_x = np.mean(x_coords) |
|
|
centroid_distance = np.sqrt((centroid_x - bbox_center[0])**2 + (centroid_y - bbox_center[1])**2) |
|
|
normalized_distance = centroid_distance / max(image.width, image.height) |
|
|
else: |
|
|
normalized_distance = 1.0 |
|
|
|
|
|
|
|
|
area_ratio = mask_area_pixels / bbox_area |
|
|
area_score = 1.0 - min(abs(area_ratio - 1.0), 1.0) |
|
|
|
|
|
|
|
|
confidence_score = mask_max |
|
|
|
|
|
|
|
|
score = ( |
|
|
bbox_overlap_ratio * 0.4 + |
|
|
(1.0 - normalized_distance) * 0.25 + |
|
|
area_score * 0.25 + |
|
|
confidence_score * 0.1 |
|
|
) |
|
|
|
|
|
print(f" 📊 STANDARD-SCORES für Maske {i+1}:") |
|
|
print(f" • BBox-Überlappung: {bbox_overlap_ratio:.3f}") |
|
|
print(f" • Zentrums-Distanz: {centroid_distance if 'centroid_distance' in locals() else 'N/A'}") |
|
|
print(f" • Flächen-Ratio: {area_ratio:.3f}") |
|
|
print(f" • GESAMTSCORE: {score:.3f}") |
|
|
|
|
|
if score > best_score: |
|
|
best_score = score |
|
|
best_mask_idx = i |
|
|
print(f" 🏆 Neue beste Maske: Nr. {i+1} mit Score {score:.3f}") |
|
|
|
|
|
print(f"✅ Beste Maske ausgewählt: Nr. {best_mask_idx+1} mit Score {best_score:.3f}") |
|
|
|
|
|
|
|
|
mask_np = all_masks[best_mask_idx] |
|
|
|
|
|
max_val = mask_np.max() |
|
|
print(f" 🔍 Maximaler SAM-Konfidenzwert der besten Maske: {max_val:.3f}") |
|
|
|
|
|
if max_val < 0.6: |
|
|
dynamic_threshold = 0.3 |
|
|
print(f" ⚠️ SAM ist unsicher (max_val={max_val:.3f} < 0.6)") |
|
|
else: |
|
|
dynamic_threshold = max_val * 0.8 |
|
|
print(f" ✅ SAM ist sicher (max_val={max_val:.3f} >= 0.6)") |
|
|
|
|
|
|
|
|
mask_array = (mask_np > dynamic_threshold).astype(np.uint8) * 255 |
|
|
|
|
|
|
|
|
if mask_array.max() == 0: |
|
|
print(" ⚠️ Maske leer, erstelle rechteckige Fallback-Maske") |
|
|
mask_array = np.zeros((512, 512), dtype=np.uint8) * 255 |
|
|
|
|
|
|
|
|
scale_x = 512 / image.width |
|
|
scale_y = 512 / image.height |
|
|
fb_x1 = int(x1 * scale_x) |
|
|
fb_y1 = int(y1 * scale_y) |
|
|
fb_x2 = int(x2 * scale_x) |
|
|
fb_y2 = int(y2 * scale_y) |
|
|
|
|
|
|
|
|
cv2.rectangle(mask_array, (fb_x1, fb_y1), (fb_x2, fb_y2), 0, -1) |
|
|
|
|
|
|
|
|
raw_mask_array = mask_array.copy() |
|
|
|
|
|
print("🌳 ENVIRONMENT-CHANGE POSTPROCESSING") |
|
|
|
|
|
|
|
|
|
|
|
if image.size != original_image.size: |
|
|
print(f" ⚠️ Bildgröße angepasst: {image.size} → {original_image.size}") |
|
|
temp_mask = Image.fromarray(mask_array).convert("L") |
|
|
temp_mask = temp_mask.resize(original_image.size, Image.Resampling.NEAREST) |
|
|
mask_array = np.array(temp_mask) |
|
|
print(f" ✅ Maske auf Originalgröße skaliert: {mask_array.shape}") |
|
|
|
|
|
|
|
|
mask_array = 255 - mask_array |
|
|
print(" ✅ Maske invertiert (Person schwarz, Hintergrund weiß)") |
|
|
|
|
|
|
|
|
print("🧹 Entferne weiße Punkte in der Person...") |
|
|
kernel_open = np.ones((3, 3), np.uint8) |
|
|
mask_array = cv2.morphologyEx(mask_array, cv2.MORPH_OPEN, kernel_open, iterations=3) |
|
|
print(" ✅ MORPH_OPEN entfernt weiße Punkte in der Person") |
|
|
|
|
|
|
|
|
print(f" Nach MORPH_OPEN - Weiße Pixel: {np.sum(mask_array > 127)}") |
|
|
|
|
|
|
|
|
print("🔧 Verbessere Umgebungsmaske...") |
|
|
kernel_close = np.ones((5, 5), np.uint8) |
|
|
mask_array = cv2.morphologyEx(mask_array, cv2.MORPH_CLOSE, kernel_close) |
|
|
print(" ✅ MORPH_CLOSE für zusammenhängende Umgebung") |
|
|
|
|
|
|
|
|
print(f" Nach MORPH_CLOSE - Weiße Pixel: {np.sum(mask_array > 127)}") |
|
|
|
|
|
|
|
|
print("🌈 Erstelle weiche Übergänge...") |
|
|
mask_array = cv2.GaussianBlur(mask_array, (9, 9), 2.0) |
|
|
print(" ✅ Gaussian Blur für weiche Übergänge") |
|
|
|
|
|
|
|
|
print(f" Nach Gaussian Blur - Min/Max: {mask_array.min()}/{mask_array.max()}") |
|
|
print(f" Nach Gaussian Blur - dtype: {mask_array.dtype}") |
|
|
|
|
|
|
|
|
print("🎛️ Wende Gamma-Korrektur an...") |
|
|
mask_array = mask_array.astype(np.float32) / 255.0 |
|
|
print(f" Konvertiert zu Float32: Min={mask_array.min():.3f}, Max={mask_array.max():.3f}") |
|
|
|
|
|
mask_array = np.clip(mask_array, 0.0, 1.0) |
|
|
mask_array = mask_array ** 0.85 |
|
|
print(f" Nach Gamma 0.85: Min={mask_array.min():.3f}, Max={mask_array.max():.3f}") |
|
|
|
|
|
mask_array = (mask_array * 255).astype(np.uint8) |
|
|
print(" ✅ Gamma-Korrektur (0.85) gegen milchige Ränder") |
|
|
|
|
|
|
|
|
print("-" * 60) |
|
|
print("📊 FINALE MASKEN-STATISTIK (ENVIRONMENT_CHANGE)") |
|
|
|
|
|
white_pixels = np.sum(mask_array > 127) |
|
|
black_pixels = np.sum(mask_array <= 127) |
|
|
total_pixels = mask_array.size |
|
|
|
|
|
white_ratio = white_pixels / total_pixels * 100 |
|
|
black_ratio = black_pixels / total_pixels * 100 |
|
|
|
|
|
print(f" Weiße Pixel (HINTERGRUND - Veränderung): {white_pixels:,} ({white_ratio:.1f}%)") |
|
|
print(f" Schwarze Pixel (PERSON - Erhaltung): {black_pixels:,} ({black_ratio:.1f}%)") |
|
|
print(f" Gesamtpixel: {total_pixels:,}") |
|
|
|
|
|
|
|
|
if white_ratio < 30: |
|
|
print(f" ⚠️ WARNUNG: Sehr wenig Hintergrund ({white_ratio:.1f}%)") |
|
|
print(f" ℹ️ Das könnte bedeuten, dass die Person zu groß segmentiert wurde") |
|
|
elif white_ratio > 90: |
|
|
print(f" ⚠️ WARNUNG: Sehr viel Hintergrund ({white_ratio:.1f}%)") |
|
|
print(f" ℹ️ Das könnte bedeuten, dass die Person zu klein segmentiert wurde") |
|
|
elif 50 <= white_ratio <= 80: |
|
|
print(f" ✅ OPTIMALES Verhältnis ({white_ratio:.1f}%)") |
|
|
else: |
|
|
print(f" ℹ️ Normales Verhältnis ({white_ratio:.1f}%)") |
|
|
|
|
|
|
|
|
mask = Image.fromarray(mask_array).convert("L") |
|
|
raw_mask = Image.fromarray(raw_mask_array).convert("L") |
|
|
|
|
|
print("#" * 80) |
|
|
print(f"✅ SAM 2 SEGMENTIERUNG ABGESCHLOSSEN") |
|
|
print(f"📐 Finale Maskengröße: {mask.size}") |
|
|
print(f"🎛️ Verwendeter Modus: {mode}") |
|
|
print("#" * 80) |
|
|
|
|
|
return mask, raw_mask |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
elif mode == "focus_change": |
|
|
print("-" * 60) |
|
|
print("🎯 MODUS: FOCUS_CHANGE (OPTIMIERT)") |
|
|
print("-" * 60) |
|
|
|
|
|
|
|
|
image_np = np.array(image.convert("RGB")) |
|
|
|
|
|
|
|
|
input_boxes = [[[x1, y1, x2, y2]]] |
|
|
|
|
|
|
|
|
center_x = (x1 + x2) // 2 |
|
|
center_y = (y1 + y2) // 2 |
|
|
input_points = [[[[center_x, center_y]]]] |
|
|
input_labels = [[[1]]] |
|
|
|
|
|
print(f" 🎯 SAM-Prompt: BBox [{x1},{y1},{x2},{y2}]") |
|
|
print(f" 👁️ Punkt: Nur Mitte ({center_x},{center_y})") |
|
|
|
|
|
|
|
|
inputs = self.sam_processor( |
|
|
image_np, |
|
|
input_boxes=input_boxes, |
|
|
input_points=input_points, |
|
|
input_labels=input_labels, |
|
|
return_tensors="pt" |
|
|
).to(self.device) |
|
|
|
|
|
|
|
|
print("🧠 SAM 2 INFERENZ (3 Masken-Varianten)") |
|
|
with torch.no_grad(): |
|
|
print(" Führe Vorhersage durch...") |
|
|
outputs = self.sam_model(**inputs) |
|
|
print(f"✅ Vorhersage abgeschlossen") |
|
|
print(f" Anzahl der Vorhersagemasken: {outputs.pred_masks.shape[2]}") |
|
|
|
|
|
num_masks = outputs.pred_masks.shape[2] |
|
|
|
|
|
|
|
|
|
|
|
all_masks = [] |
|
|
|
|
|
for i in range(num_masks): |
|
|
single_mask = outputs.pred_masks[:, :, i, :, :] |
|
|
resized_mask = F.interpolate( |
|
|
single_mask, |
|
|
size=(image.height, image.width), |
|
|
mode='bilinear', |
|
|
align_corners=False |
|
|
).squeeze() |
|
|
|
|
|
mask_np = resized_mask.sigmoid().cpu().numpy() |
|
|
all_masks.append(mask_np) |
|
|
|
|
|
|
|
|
|
|
|
bbox_center = ((x1 + x2) // 2, (y1 + y2) // 2) |
|
|
bbox_area = (x2 - x1) * (y2 - y1) |
|
|
|
|
|
print("🤔 HEURISTIK: Beste Maske auswählen") |
|
|
best_mask_idx = 0 |
|
|
best_score = -1 |
|
|
|
|
|
|
|
|
for i in range(num_masks): |
|
|
|
|
|
|
|
|
mask_np_temp = all_masks[i] |
|
|
|
|
|
|
|
|
mask_max = mask_np_temp.max() |
|
|
if mask_max < 0.3: |
|
|
continue |
|
|
|
|
|
adaptive_threshold = max(0.3, mask_max * 0.7) |
|
|
mask_binary = (mask_np_temp > adaptive_threshold).astype(np.uint8) |
|
|
|
|
|
|
|
|
if np.sum(mask_binary) == 0: |
|
|
continue |
|
|
|
|
|
|
|
|
mask_area_pixels = np.sum(mask_binary) |
|
|
|
|
|
|
|
|
bbox_mask = np.zeros((image.height, image.width), dtype=np.uint8) |
|
|
bbox_mask[y1:y2, x1:x2] = 1 |
|
|
overlap = np.sum(mask_binary & bbox_mask) |
|
|
bbox_overlap_ratio = overlap / np.sum(bbox_mask) if np.sum(bbox_mask) > 0 else 0 |
|
|
|
|
|
|
|
|
y_coords, x_coords = np.where(mask_binary > 0) |
|
|
if len(y_coords) > 0: |
|
|
centroid_y = np.mean(y_coords) |
|
|
centroid_x = np.mean(x_coords) |
|
|
centroid_distance = np.sqrt((centroid_x - bbox_center[0])**2 + |
|
|
(centroid_y - bbox_center[1])**2) |
|
|
normalized_distance = centroid_distance / max(image.width, image.height) |
|
|
else: |
|
|
normalized_distance = 1.0 |
|
|
|
|
|
|
|
|
area_ratio = mask_area_pixels / bbox_area |
|
|
area_score = 1.0 - min(abs(area_ratio - 1.0), 1.0) |
|
|
|
|
|
|
|
|
score = ( |
|
|
bbox_overlap_ratio * 0.4 + |
|
|
(1.0 - normalized_distance) * 0.25 + |
|
|
area_score * 0.25 + |
|
|
mask_max * 0.1 |
|
|
) |
|
|
|
|
|
print(f" Maske {i+1}: Score={score:.3f}, " |
|
|
f"Überlappung={bbox_overlap_ratio:.3f}, " |
|
|
f"Fläche={mask_area_pixels:,}px") |
|
|
|
|
|
if score > best_score: |
|
|
best_score = score |
|
|
best_mask_idx = i |
|
|
|
|
|
print(f"✅ Beste Maske: Nr. {best_mask_idx+1} mit Score {best_score:.3f}") |
|
|
|
|
|
|
|
|
best_mask_256 = outputs.pred_masks[:, :, best_mask_idx, :, :] |
|
|
resized_mask = F.interpolate( |
|
|
best_mask_256, |
|
|
size=(512, 512), |
|
|
mode='bilinear', |
|
|
align_corners=False |
|
|
).squeeze() |
|
|
|
|
|
mask_np = resized_mask.cpu().numpy() |
|
|
print(f" 🔄 Beste Maske skaliert auf 512×512 für ControlNet") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mask_max = mask_np.max() |
|
|
if best_score < 0.7: |
|
|
dynamic_threshold = 0.05 |
|
|
print(f" ⚠️ Masken-Score niedrig ({best_score:.3f}). " |
|
|
f"Threshold=0.05 für maximale Abdeckung") |
|
|
else: |
|
|
dynamic_threshold = max(0.15, mask_max * 0.3) |
|
|
print(f" ✅ Gute Maske. Threshold={dynamic_threshold:.3f}") |
|
|
|
|
|
|
|
|
mask_array = (mask_np > dynamic_threshold).astype(np.uint8) * 255 |
|
|
|
|
|
|
|
|
if mask_array.max() == 0: |
|
|
print(" ⚠️ Maske leer, erstelle rechteckige Fallback-Maske") |
|
|
mask_array = np.zeros((512, 512), dtype=np.uint8) |
|
|
|
|
|
scale_x = 512 / image.width |
|
|
scale_y = 512 / image.height |
|
|
fb_x1 = int(x1 * scale_x) |
|
|
fb_y1 = int(y1 * scale_y) |
|
|
fb_x2 = int(x2 * scale_x) |
|
|
fb_y2 = int(y2 * scale_y) |
|
|
cv2.rectangle(mask_array, (fb_x1, fb_y1), (fb_x2, fb_y2), 255, -1) |
|
|
|
|
|
|
|
|
raw_mask_array = mask_array.copy() |
|
|
|
|
|
|
|
|
print("🔧 FOCUS_CHANGE POSTPROCESSING (auf 512×512)") |
|
|
print(f" mask_array - Min/Max: {mask_array.min()}/{mask_array.max()}") |
|
|
print(f" mask_array - Weiße Pixel: {np.sum(mask_array > 0)}") |
|
|
print(f" mask_array - Shape: {mask_array.shape}") |
|
|
print(f" mask_array - dtype: {mask_array.dtype}") |
|
|
|
|
|
|
|
|
labeled_array, num_features = ndimage.label(mask_array) |
|
|
if num_features > 1: |
|
|
sizes = ndimage.sum(mask_array, labeled_array, range(1, num_features + 1)) |
|
|
largest_component = np.argmax(sizes) + 1 |
|
|
mask_array = np.where(labeled_array == largest_component, mask_array, 0) |
|
|
print(f" ✅ Größte Komponente behalten ({num_features}→1)") |
|
|
|
|
|
|
|
|
kernel_close = np.ones((5, 5), np.uint8) |
|
|
mask_array = cv2.morphologyEx(mask_array, cv2.MORPH_CLOSE, kernel_close, iterations=2) |
|
|
|
|
|
kernel_dilate = np.ones((15, 15), np.uint8) |
|
|
mask_array = cv2.dilate(mask_array, kernel_dilate, iterations=1) |
|
|
|
|
|
|
|
|
mask_array = cv2.GaussianBlur(mask_array, (9, 9), 2.0) |
|
|
|
|
|
|
|
|
mask_array_float = mask_array.astype(np.float32) / 255.0 |
|
|
mask_array_float = np.clip(mask_array_float, 0.0, 1.0) |
|
|
mask_array_float = mask_array_float ** 0.85 |
|
|
mask_array = (mask_array_float * 255).astype(np.uint8) |
|
|
|
|
|
|
|
|
mask_512 = Image.fromarray(mask_array).convert("L") |
|
|
raw_mask = Image.fromarray(raw_mask_array).convert("L") |
|
|
|
|
|
|
|
|
mask = mask_512 |
|
|
|
|
|
print(f"✅ FOCUS_CHANGE Maske erstellt: {mask.size}") |
|
|
return mask, raw_mask |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
elif mode == "face_only_change": |
|
|
print("-" * 60) |
|
|
print("👤 SPEZIALMODUS: NUR GESICHT - ROBUSTER WORKFLOW") |
|
|
print("-" * 60) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
original_image = image |
|
|
print(f"💾 Originalbild gesichert: {original_image.size}") |
|
|
original_bbox = (x1, y1, x2, y2) |
|
|
print(f"💾 Original-BBox gespeichert: {original_bbox}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("✂️ SCHRITT 2: ERSTELLE QUADRATISCHEN AUSSCHNITT (BBox × 2.5)") |
|
|
|
|
|
|
|
|
bbox_center_x = (x1 + x2) // 2 |
|
|
bbox_center_y = (y1 + y2) // 2 |
|
|
print(f" 📍 BBox-Zentrum: ({bbox_center_x}, {bbox_center_y})") |
|
|
|
|
|
|
|
|
bbox_width = x2 - x1 |
|
|
bbox_height = y2 - y1 |
|
|
bbox_max_dim = max(bbox_width, bbox_height) |
|
|
print(f" 📏 BBox Dimensionen: {bbox_width} × {bbox_height} px") |
|
|
print(f" 📐 Maximale BBox-Dimension: {bbox_max_dim} px") |
|
|
|
|
|
|
|
|
crop_size = int(bbox_max_dim * 2.5) |
|
|
print(f" 🎯 Ziel-Crop-Größe: {crop_size} × {crop_size} px (BBox × 2.5)") |
|
|
|
|
|
|
|
|
crop_x1 = bbox_center_x - crop_size // 2 |
|
|
crop_y1 = bbox_center_y - crop_size // 2 |
|
|
crop_x2 = crop_x1 + crop_size |
|
|
crop_y2 = crop_y1 + crop_size |
|
|
|
|
|
|
|
|
crop_x1 = max(0, crop_x1) |
|
|
crop_y1 = max(0, crop_y1) |
|
|
crop_x2 = min(original_image.width, crop_x2) |
|
|
crop_y2 = min(original_image.height, crop_y2) |
|
|
|
|
|
|
|
|
|
|
|
max_iterations = 3 |
|
|
print(f" 🔄 Iterative Crop-Anpassung (max. {max_iterations} Versuche)") |
|
|
|
|
|
for iteration in range(max_iterations): |
|
|
actual_crop_width = crop_x2 - crop_x1 |
|
|
actual_crop_height = crop_y2 - crop_y1 |
|
|
|
|
|
|
|
|
if actual_crop_width >= crop_size and actual_crop_height >= crop_size: |
|
|
print(f" ✅ Crop-Größe OK nach {iteration} Iteration(en): {actual_crop_width}×{actual_crop_height} px") |
|
|
break |
|
|
|
|
|
print(f" 🔄 Iteration {iteration+1}: Crop zu klein ({actual_crop_width}×{actual_crop_height})") |
|
|
|
|
|
|
|
|
if actual_crop_width < crop_size: |
|
|
if crop_x1 == 0: |
|
|
crop_x2 = min(original_image.width, crop_x1 + crop_size) |
|
|
print(f" ← Breite angepasst (linker Rand): crop_x2 = {crop_x2}") |
|
|
elif crop_x2 == original_image.width: |
|
|
crop_x1 = max(0, crop_x2 - crop_size) |
|
|
print(f" → Breite angepasst (rechter Rand): crop_x1 = {crop_x1}") |
|
|
else: |
|
|
|
|
|
missing_width = crop_size - actual_crop_width |
|
|
expand_left = missing_width // 2 |
|
|
expand_right = missing_width - expand_left |
|
|
|
|
|
crop_x1 = max(0, crop_x1 - expand_left) |
|
|
crop_x2 = min(original_image.width, crop_x2 + expand_right) |
|
|
print(f" ↔ Zentriert erweitert um {missing_width}px") |
|
|
|
|
|
|
|
|
if actual_crop_height < crop_size: |
|
|
if crop_y1 == 0: |
|
|
crop_y2 = min(original_image.height, crop_y1 + crop_size) |
|
|
print(f" ↑ Höhe angepasst (oberer Rand): crop_y2 = {crop_y2}") |
|
|
elif crop_y2 == original_image.height: |
|
|
crop_y1 = max(0, crop_y2 - crop_size) |
|
|
print(f" ↓ Höhe angepasst (unterer Rand): crop_y1 = {crop_y1}") |
|
|
else: |
|
|
|
|
|
missing_height = crop_size - actual_crop_height |
|
|
expand_top = missing_height // 2 |
|
|
expand_bottom = missing_height - expand_top |
|
|
|
|
|
crop_y1 = max(0, crop_y1 - expand_top) |
|
|
crop_y2 = min(original_image.height, crop_y2 + expand_bottom) |
|
|
print(f" ↕ Zentriert erweitert um {missing_height}px") |
|
|
|
|
|
|
|
|
crop_x1 = max(0, crop_x1) |
|
|
crop_y1 = max(0, crop_y1) |
|
|
crop_x2 = min(original_image.width, crop_x2) |
|
|
crop_y2 = min(original_image.height, crop_y2) |
|
|
|
|
|
|
|
|
if iteration == max_iterations - 1: |
|
|
actual_crop_width = crop_x2 - crop_x1 |
|
|
actual_crop_height = crop_y2 - crop_y1 |
|
|
print(f" ⚠️ Max. Iterationen erreicht. Finaler Crop: {actual_crop_width}×{actual_crop_height} px") |
|
|
|
|
|
|
|
|
if actual_crop_width < crop_size or actual_crop_height < crop_size: |
|
|
min_acceptable = int(bbox_max_dim * 1.8) |
|
|
if actual_crop_width < min_acceptable or actual_crop_height < min_acceptable: |
|
|
print(f" 🚨 KRITISCH: Crop immer noch zu klein ({actual_crop_width}×{actual_crop_height})") |
|
|
print(f" 🚨 SAM könnte Probleme haben!") |
|
|
|
|
|
print(f" 🔲 Finaler Crop-Bereich: [{crop_x1}, {crop_y1}, {crop_x2}, {crop_y2}]") |
|
|
print(f" 📏 Finale Crop-Größe: {crop_x2-crop_x1} × {crop_y2-crop_y1} px") |
|
|
|
|
|
|
|
|
|
|
|
cropped_image = original_image.crop((crop_x1, crop_y1, crop_x2, crop_y2)) |
|
|
print(f" ✅ Quadratischer Ausschnitt erstellt: {cropped_image.size}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("📐 SCHRITT 3: BBox-KOORDINATEN TRANSFORMIEREN") |
|
|
rel_x1 = x1 - crop_x1 |
|
|
rel_y1 = y1 - crop_y1 |
|
|
rel_x2 = x2 - crop_x1 |
|
|
rel_y2 = y2 - crop_y1 |
|
|
|
|
|
|
|
|
rel_x1 = max(0, rel_x1) |
|
|
rel_y1 = max(0, rel_y1) |
|
|
rel_x2 = min(cropped_image.width, rel_x2) |
|
|
rel_y2 = min(cropped_image.height, rel_y2) |
|
|
|
|
|
print(f" 🎯 Relative BBox im Crop: [{rel_x1}, {rel_y1}, {rel_x2}, {rel_y2}]") |
|
|
print(f" 📏 Relative BBox Größe: {rel_x2-rel_x1} × {rel_y2-rel_y1} px") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("🔍 SCHRITT 4: ERWEITERTE BILDAUFBEREITUNG FÜR GESICHTSERKENNUNG") |
|
|
|
|
|
|
|
|
contrast_enhancer = ImageEnhance.Contrast(cropped_image) |
|
|
enhanced_image = contrast_enhancer.enhance(1.8) |
|
|
|
|
|
|
|
|
sharpness_enhancer = ImageEnhance.Sharpness(enhanced_image) |
|
|
enhanced_image = sharpness_enhancer.enhance(2.0) |
|
|
|
|
|
|
|
|
brightness_enhancer = ImageEnhance.Brightness(enhanced_image) |
|
|
enhanced_image = brightness_enhancer.enhance(1.1) |
|
|
|
|
|
print(f" ✅ Erweiterte Bildaufbereitung abgeschlossen") |
|
|
print(f" • Kontrast: +80%") |
|
|
print(f" • Schärfe: +100%") |
|
|
print(f" • Helligkeit: +10%") |
|
|
|
|
|
|
|
|
image = enhanced_image |
|
|
x1, y1, x2, y2 = rel_x1, rel_y1, rel_x2, rel_y2 |
|
|
|
|
|
print(" 🔄 SAM wird auf aufbereitetem Ausschnitt ausgeführt") |
|
|
print(f" 📊 SAM-Eingabegröße: {image.size}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("-" * 60) |
|
|
print(f"📦 BOUNDING BOX DETAILS FÜR SAM:") |
|
|
print(f" Bild-Größe für SAM: {image.size}") |
|
|
print(f" BBox Koordinaten: [{x1}, {y1}, {x2}, {y2}]") |
|
|
print(f" BBox Dimensionen: {x2-x1}px × {y2-y1}px") |
|
|
|
|
|
|
|
|
print("-" * 60) |
|
|
print("🖼️ BILDAUFBEREITUNG FÜR SAM 2") |
|
|
|
|
|
image_np = np.array(image.convert("RGB")) |
|
|
|
|
|
|
|
|
input_boxes = [[[x1, y1, x2, y2]]] |
|
|
|
|
|
|
|
|
center_x = (x1 + x2) // 2 |
|
|
center_y = (y1 + y2) // 2 |
|
|
|
|
|
|
|
|
bbox_height = y2 - y1 |
|
|
face_offset = int(bbox_height * 0.3) |
|
|
face_x = center_x |
|
|
face_y = center_y - face_offset |
|
|
face_y = max(y1 + 10, min(face_y, y2 - 10)) |
|
|
|
|
|
|
|
|
input_points = [[[[center_x, center_y], [face_x, face_y]]]] |
|
|
input_labels = [[[1, 1]]] |
|
|
|
|
|
print(f" 🎯 SAM-Prompt: BBox [{x1},{y1},{x2},{y2}]") |
|
|
print(f" 👁️ Punkte: Mitte ({center_x},{center_y}), Gesicht ({face_x},{face_y})") |
|
|
|
|
|
|
|
|
|
|
|
inputs = self.sam_processor( |
|
|
image_np, |
|
|
input_boxes=input_boxes, |
|
|
input_points=input_points, |
|
|
input_labels=input_labels, |
|
|
return_tensors="pt" |
|
|
).to(self.device) |
|
|
|
|
|
print(f"✅ Processor-Ausgabe: Dictionary mit {len(inputs)} Schlüsseln: {list(inputs.keys())}") |
|
|
print(f" - 'pixel_values' Shape: {inputs['pixel_values'].shape}") |
|
|
print(f" - 'input_boxes' Shape: {inputs['input_boxes'].shape}") |
|
|
if 'input_points' in inputs: |
|
|
print(f" - 'input_points' Shape: {inputs['input_points'].shape}") |
|
|
|
|
|
|
|
|
print("-" * 60) |
|
|
print("🧠 SAM 2 INFERENZ (Vorhersage)") |
|
|
with torch.no_grad(): |
|
|
print(" Führe Vorhersage durch...") |
|
|
outputs = self.sam_model(**inputs) |
|
|
print(f"✅ Vorhersage abgeschlossen") |
|
|
print(f" Anzahl der Vorhersagemasken: {outputs.pred_masks.shape[2]}") |
|
|
|
|
|
|
|
|
print("📏 SCHRITT 6: MASKE EXTRAHIEREN") |
|
|
|
|
|
num_masks = outputs.pred_masks.shape[2] |
|
|
print(f" SAM lieferte {num_masks} verschiedene Masken") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
all_masks_crop = [] |
|
|
all_masks_original = [] |
|
|
|
|
|
for i in range(num_masks): |
|
|
single_mask = outputs.pred_masks[:, :, i, :, :] |
|
|
|
|
|
resized_mask_original = F.interpolate( |
|
|
single_mask, |
|
|
size=(original_image.height, original_image.width), |
|
|
mode='bilinear', |
|
|
align_corners=False |
|
|
).squeeze() |
|
|
|
|
|
mask_np_original = resized_mask_original.sigmoid().cpu().numpy() |
|
|
all_masks_original.append(mask_np_original) |
|
|
|
|
|
|
|
|
resized_mask_crop = F.interpolate( |
|
|
single_mask, |
|
|
size=(image.height, image.width), |
|
|
mode='bilinear', |
|
|
align_corners=False |
|
|
).squeeze() |
|
|
mask_np_crop = resized_mask_crop.sigmoid().cpu().numpy() |
|
|
all_masks_crop.append(mask_np_crop) |
|
|
|
|
|
|
|
|
mask_binary_crop = (mask_np_crop > 0.5).astype(np.uint8) |
|
|
mask_binary_original = (mask_np_original > 0.5).astype(np.uint8) |
|
|
print(f" Maske {i+1}: Crop={np.sum(mask_binary_crop):,}px, " |
|
|
f"Original={np.sum(mask_binary_original):,}px") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("🤔 SCHRITT 6: MASKENAUSWAHL MIT MODUS-SPEZIFISCHER HEURISTIK") |
|
|
|
|
|
bbox_center = ((original_bbox[0] + original_bbox[2]) // 2, |
|
|
(original_bbox[1] + original_bbox[3]) // 2) |
|
|
bbox_area = (original_bbox[2] - original_bbox[0]) * (original_bbox[3] - original_bbox[1]) |
|
|
|
|
|
|
|
|
best_mask_idx = 0 |
|
|
best_score = -1 |
|
|
|
|
|
for i, mask_np in enumerate(all_masks_original): |
|
|
mask_max = mask_np.max() |
|
|
|
|
|
|
|
|
if mask_max < 0.3: |
|
|
print(f" ❌ Maske {i+1}: Zu niedrige Konfidenz ({mask_max:.3f}), überspringe") |
|
|
continue |
|
|
|
|
|
|
|
|
adaptive_threshold = max(0.3, mask_max * 0.7) |
|
|
mask_binary = (mask_np > adaptive_threshold).astype(np.uint8) |
|
|
|
|
|
if np.sum(mask_binary) == 0: |
|
|
print(f" ❌ Maske {i+1}: Keine Pixel nach Threshold {adaptive_threshold:.3f}") |
|
|
continue |
|
|
|
|
|
mask_area_pixels = np.sum(mask_binary) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print(f" 🔍 Analysiere Maske {i+1} mit GESICHTS-HEURISTIK") |
|
|
|
|
|
|
|
|
area_ratio = mask_area_pixels / bbox_area |
|
|
print(f" 📐 Flächen-Ratio: {area_ratio:.3f} ({mask_area_pixels:,} / {bbox_area:,} Pixel)") |
|
|
|
|
|
|
|
|
if area_ratio < 0.6: |
|
|
print(f" ⚠️ Fläche zu klein für Kopf (<60% der BBox)") |
|
|
area_score = area_ratio * 0.5 |
|
|
elif area_ratio > 1.5: |
|
|
print(f" ⚠️ Fläche zu groß für Kopf (>150% der BBox)") |
|
|
area_score = 2.0 - area_ratio |
|
|
elif 0.8 <= area_ratio <= 1.2: |
|
|
area_score = 1.0 |
|
|
print(f" ✅ Perfekte Kopfgröße (80-120% der BBox)") |
|
|
else: |
|
|
|
|
|
area_score = 1.0 - abs(area_ratio - 1.0) * 0.5 |
|
|
|
|
|
|
|
|
labeled_mask = measure.label(mask_binary) |
|
|
regions = measure.regionprops(labeled_mask) |
|
|
|
|
|
if len(regions) == 0: |
|
|
compactness_score = 0.1 |
|
|
print(f" ❌ Keine zusammenhängenden Regionen gefunden") |
|
|
else: |
|
|
|
|
|
largest_region = max(regions, key=lambda r: r.area) |
|
|
|
|
|
|
|
|
solidity = largest_region.solidity if hasattr(largest_region, 'solidity') else 0.7 |
|
|
|
|
|
|
|
|
eccentricity = largest_region.eccentricity if hasattr(largest_region, 'eccentricity') else 0.5 |
|
|
|
|
|
|
|
|
|
|
|
if 0.4 <= eccentricity <= 0.9: |
|
|
eccentricity_score = 1.0 - abs(eccentricity - 0.65) * 2 |
|
|
else: |
|
|
eccentricity_score = 0.2 |
|
|
|
|
|
compactness_score = (solidity * 0.6 + eccentricity_score * 0.4) |
|
|
print(f" 🎯 Kompaktheits-Analyse:") |
|
|
print(f" • Solidität (Fläche/Konvex): {solidity:.3f}") |
|
|
print(f" • Exzentrizität (Form): {eccentricity:.3f}") |
|
|
print(f" • Kompaktheits-Score: {compactness_score:.3f}") |
|
|
|
|
|
|
|
|
bbox_mask = np.zeros((original_image.height, original_image.width), dtype=np.uint8) |
|
|
|
|
|
bbox_mask[original_bbox[1]:original_bbox[3], original_bbox[0]:original_bbox[2]] = 1 |
|
|
|
|
|
overlap = np.sum(mask_binary & bbox_mask) |
|
|
|
|
|
bbox_overlap_ratio = overlap / mask_area_pixels if mask_area_pixels > 0 else 0 |
|
|
|
|
|
print(f" 📍 BBox-Überlappung: {overlap:,} von {mask_area_pixels:,} Pixeln ({bbox_overlap_ratio:.1%})") |
|
|
|
|
|
|
|
|
|
|
|
if bbox_overlap_ratio >= 0.7: |
|
|
bbox_score = 1.0 |
|
|
print(f" ✅ Hohe BBox-Überlappung: {bbox_overlap_ratio:.3f} ({overlap:,} Pixel)") |
|
|
elif bbox_overlap_ratio >= 0.5: |
|
|
bbox_score = bbox_overlap_ratio * 1.2 |
|
|
print(f" ⚠️ Mittlere BBox-Überlappung: {bbox_overlap_ratio:.3f}") |
|
|
else: |
|
|
bbox_score = bbox_overlap_ratio * 0.8 |
|
|
print(f" ❌ Geringe BBox-Überlappung: {bbox_overlap_ratio:.3f}") |
|
|
|
|
|
|
|
|
confidence_score = mask_max |
|
|
|
|
|
|
|
|
score = ( |
|
|
area_score * 0.4 + |
|
|
compactness_score * 0.3 + |
|
|
bbox_score * 0.2 + |
|
|
confidence_score * 0.1 |
|
|
) |
|
|
|
|
|
print(f" 📊 GESICHTS-SCORES für Maske {i+1}:") |
|
|
print(f" • Flächen-Score: {area_score:.3f}") |
|
|
print(f" • Kompaktheits-Score: {compactness_score:.3f}") |
|
|
print(f" • BBox-Überlappungs-Score: {bbox_score:.3f}") |
|
|
print(f" • Konfidenz-Score: {confidence_score:.3f}") |
|
|
print(f" • GESAMTSCORE: {score:.3f}") |
|
|
|
|
|
if score > best_score: |
|
|
best_score = score |
|
|
best_mask_idx = i |
|
|
print(f" 🏆 Neue beste Maske: Nr. {i+1} mit Score {score:.3f}") |
|
|
|
|
|
print(f"✅ Beste Maske ausgewählt: Nr. {best_mask_idx+1} mit Score {best_score:.3f}") |
|
|
|
|
|
|
|
|
mask_np = all_masks_crop[best_mask_idx] |
|
|
max_val = mask_np.max() |
|
|
print(f"🔍 Maximaler SAM-Konfidenzwert der besten Maske: {max_val:.3f}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if max_val < 0.5: |
|
|
dynamic_threshold = 0.25 |
|
|
print(f" ⚠️ SAM ist unsicher für Gesicht (max_val={max_val:.3f} < 0.5)") |
|
|
elif max_val < 0.8: |
|
|
dynamic_threshold = max_val * 0.65 |
|
|
print(f" ℹ️ SAM ist mäßig sicher für Gesicht (max_val={max_val:.3f})") |
|
|
else: |
|
|
dynamic_threshold = max_val * 0.75 |
|
|
print(f" ✅ SAM ist sicher für Gesicht (max_val={max_val:.3f} >= 0.8)") |
|
|
|
|
|
print(f" 🎯 Gesichts-Threshold: {dynamic_threshold:.3f}") |
|
|
|
|
|
|
|
|
print("🐛 DEBUG THRESHOLD:") |
|
|
print(f" mask_np Min/Max: {mask_np.min():.3f}/{mask_np.max():.3f}") |
|
|
print(f" dynamic_threshold: {dynamic_threshold:.3f}") |
|
|
|
|
|
mask_array = (mask_np > dynamic_threshold).astype(np.uint8) * 255 |
|
|
|
|
|
print(f"🚨 DEBUG BINÄRMASKE:") |
|
|
print(f" mask_array Min/Max: {mask_array.min()}/{mask_array.max()}") |
|
|
print(f" Weiße Pixel in mask_array: {np.sum(mask_array > 0)}") |
|
|
print(f" Anteil weiße Pixel: {np.sum(mask_array > 0) / mask_array.size:.1%}") |
|
|
|
|
|
|
|
|
if mask_array.max() == 0: |
|
|
print("⚠️ KRITISCH: Binärmaske ist leer! Erzwinge Testmaske (BBox).") |
|
|
print(f" 🚨 BBox für Fallback: x1={x1}, y1={y1}, x2={x2}, y2={y2}") |
|
|
|
|
|
test_mask = np.zeros((image.height, image.width), dtype=np.uint8) |
|
|
cv2.rectangle(test_mask, (x1, y1), (x2, y2), 255, -1) |
|
|
|
|
|
mask_array = test_mask |
|
|
print(f"🐛 DEBUG ERZWUNGENE MASKE: Weiße Pixel: {np.sum(mask_array > 0)}") |
|
|
|
|
|
|
|
|
raw_mask_array = mask_array.copy() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("👤 GESICHTS-SPEZIFISCHES POSTPROCESSING") |
|
|
|
|
|
|
|
|
labeled_array, num_features = ndimage.label(mask_array) |
|
|
|
|
|
if num_features > 0: |
|
|
print(f" 🔍 Gefundene Komponenten: {num_features}") |
|
|
|
|
|
sizes = ndimage.sum(mask_array, labeled_array, range(1, num_features + 1)) |
|
|
largest_component_idx = np.argmax(sizes) + 1 |
|
|
|
|
|
print(f" 👑 Größte Komponente: Nr. {largest_component_idx} mit {sizes[largest_component_idx-1]:,} Pixel") |
|
|
|
|
|
|
|
|
mask_array = np.where(labeled_array == largest_component_idx, mask_array, 0) |
|
|
|
|
|
|
|
|
print(" ⚙️ Morphologische Operationen für sauberen Kopf") |
|
|
|
|
|
|
|
|
kernel_close = np.ones((7, 7), np.uint8) |
|
|
mask_array = cv2.morphologyEx(mask_array, cv2.MORPH_CLOSE, kernel_close, iterations=1) |
|
|
print(" • MORPH_CLOSE (7x7) - Löcher im Kopf füllen") |
|
|
|
|
|
|
|
|
kernel_open = np.ones((5, 5), np.uint8) |
|
|
mask_array = cv2.morphologyEx(mask_array, cv2.MORPH_OPEN, kernel_open, iterations=1) |
|
|
print(" • MORPH_OPEN (5x5) - Rauschen entfernen") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("🔄 MASKE IMMER ZURÜCK AUF 512x512 TRANSFORMIEREN") |
|
|
|
|
|
|
|
|
temp_mask = Image.fromarray(mask_array).convert("L") |
|
|
print(f" Maskengröße auf Ausschnitt: {temp_mask.size}") |
|
|
|
|
|
|
|
|
mask_512 = temp_mask.resize((512,512), Image.Resampling.LANCZOS) |
|
|
print(f" Maske auf 512x512 skaliert") |
|
|
|
|
|
|
|
|
|
|
|
raw_mask_512 = Image.fromarray(raw_mask_array).convert("L").resize( |
|
|
(512, 512), Image.Resampling.NEAREST |
|
|
) |
|
|
|
|
|
|
|
|
image = original_image |
|
|
print(f" 🔄 Bild-Referenz wieder auf Original gesetzt: {image.size}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("📊 FINALE MASKEN-STATISTIK") |
|
|
|
|
|
|
|
|
white_pixels = np.sum(mask_array > 0) |
|
|
total_pixels = mask_array.size |
|
|
white_ratio = white_pixels / total_pixels * 100 if total_pixels >0 else 0 |
|
|
|
|
|
|
|
|
original_bbox_width = original_bbox[2] - original_bbox[0] |
|
|
original_bbox_height = original_bbox[3] - original_bbox[1] |
|
|
original_face_area = original_bbox_width * original_bbox_height |
|
|
coverage_ratio = white_pixels / original_face_area if original_face_area > 0 else 0 |
|
|
|
|
|
print(f" 👤 GESICHTSABDECKUNG: {coverage_ratio:.1%} der ursprünglichen BBox") |
|
|
|
|
|
print(f" Weiße Pixel (Veränderungsbereich): {white_pixels:,} ({white_ratio:.1f}%)") |
|
|
print(f" Schwarze Pixel (Erhaltungsbereich): {total_pixels-white_pixels:,} ({100-white_ratio:.1f}%)") |
|
|
print(f" Gesamtpixel: {total_pixels:,}") |
|
|
|
|
|
|
|
|
if coverage_ratio < 0.7: |
|
|
print(f" ⚠️ WARNUNG: Geringe Gesichtsabdeckung ({coverage_ratio:.1%})") |
|
|
elif coverage_ratio > 1.3: |
|
|
print(f" ⚠️ WARNUNG: Sehr hohe Gesichtsabdeckung ({coverage_ratio:.1%})") |
|
|
elif 0.8 <= coverage_ratio <= 1.2: |
|
|
print(f" ✅ OPTIMALE Gesichtsabdeckung ({coverage_ratio:.1%})") |
|
|
|
|
|
print("#" * 80) |
|
|
print(f"✅ SAM 2 SEGMENTIERUNG ABGESCHLOSSEN") |
|
|
print(f"📐 Finale Maskengröße: {mask_512.size}") |
|
|
print(f"🎛️ Verwendeter Modus: {mode}") |
|
|
print(f"👤 Crop={crop_size}×{crop_size}px, Heuristik-Score={best_score:.3f}") |
|
|
print(f"👤 Kopfabdeckung: {coverage_ratio:.1%} der BBox") |
|
|
print("#" * 80) |
|
|
|
|
|
|
|
|
return mask_512, raw_mask_512 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
else: |
|
|
print(f"❌ Unbekannter Modus: {mode}") |
|
|
return self._create_rectangular_mask(image, bbox_coords, "focus_change") |
|
|
|
|
|
except Exception as e: |
|
|
print("❌" * 40) |
|
|
print("❌ FEHLER IN SAM 2 SEGMENTIERUNG") |
|
|
print(f"Fehler: {str(e)[:200]}") |
|
|
print("❌" * 40) |
|
|
import traceback |
|
|
traceback.print_exc() |
|
|
|
|
|
|
|
|
fallback_mask = self._create_rectangular_mask(original_image, original_bbox, mode) |
|
|
if fallback_mask.size != original_image.size: |
|
|
print(f" ⚠️ Fallback-Maske angepasst: {fallback_mask.size} → {original_image.size}") |
|
|
fallback_mask = fallback_mask.resize(original_image.size, Image.Resampling.NEAREST) |
|
|
|
|
|
return fallback_mask, fallback_mask |