eho69 commited on
Commit
bb46a33
·
verified ·
1 Parent(s): 70f94d5
Files changed (1) hide show
  1. app.py +122 -0
app.py CHANGED
@@ -347,6 +347,91 @@ def apply_thresholding(image, method, threshold_value, max_value):
347
  return result_rgb, info
348
 
349
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
350
  def analyze_image_stats(image):
351
  """Provide detailed statistical analysis"""
352
  if isinstance(image, Image.Image):
@@ -587,6 +672,34 @@ with gr.Blocks(title="Image Preprocessing Analyzer", theme=gr.themes.Soft()) as
587
  inputs=[input_image, thresh_method, thresh_value, thresh_max],
588
  outputs=[thresh_output, thresh_info]
589
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
590
 
591
  # Documentation
592
  with gr.Accordion("📚 Filter Documentation", open=False):
@@ -637,6 +750,15 @@ with gr.Blocks(title="Image Preprocessing Analyzer", theme=gr.themes.Soft()) as
637
  - **Binary**: Simple cutoff threshold
638
  - **Otsu**: Automatically finds optimal threshold
639
  - **Adaptive**: Different thresholds for different regions
 
 
 
 
 
 
 
 
 
640
  """)
641
 
642
  if __name__ == "__main__":
 
347
  return result_rgb, info
348
 
349
 
350
+ def apply_grabcut(image, margin_percent, iterations):
351
+ """
352
+ Apply GrabCut algorithm for background subtraction
353
+
354
+ GrabCut is an interactive foreground extraction algorithm that uses
355
+ graph cuts and Gaussian Mixture Models (GMM) to separate foreground
356
+ from background.
357
+ """
358
+ if isinstance(image, Image.Image):
359
+ image = np.array(image)
360
+
361
+ # Ensure RGB format
362
+ if len(image.shape) == 2:
363
+ image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
364
+
365
+ # Create a copy for processing
366
+ img = image.copy()
367
+ h, w = img.shape[:2]
368
+
369
+ # Create mask (0 = background, 1 = foreground, 2 = probably background, 3 = probably foreground)
370
+ mask = np.zeros(img.shape[:2], np.uint8)
371
+
372
+ # Initialize background and foreground models (used internally by GrabCut)
373
+ bgd_model = np.zeros((1, 65), np.float64)
374
+ fgd_model = np.zeros((1, 65), np.float64)
375
+
376
+ # Define rectangle around the object (margin from edges)
377
+ margin_h = int(h * margin_percent / 100)
378
+ margin_w = int(w * margin_percent / 100)
379
+ rect = (margin_w, margin_h, w - 2*margin_w, h - 2*margin_h)
380
+
381
+ # Apply GrabCut algorithm
382
+ # Iterations: more iterations = more accurate but slower
383
+ cv2.grabCut(img, mask, rect, bgd_model, fgd_model, iterations, cv2.GC_INIT_WITH_RECT)
384
+
385
+ # Create binary mask where foreground (1 or 3) = 1, background (0 or 2) = 0
386
+ mask_binary = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
387
+
388
+ # Extract foreground
389
+ foreground = img * mask_binary[:, :, np.newaxis]
390
+
391
+ # Create visualization showing the mask
392
+ mask_vis = mask_binary * 255
393
+ mask_rgb = cv2.cvtColor(mask_vis, cv2.COLOR_GRAY2RGB)
394
+
395
+ # Create combined view: Original | Mask | Foreground
396
+ # Resize for side-by-side display
397
+ scale = 0.33
398
+ h_new, w_new = int(h * scale), int(w * scale)
399
+
400
+ original_small = cv2.resize(image, (w_new, h_new))
401
+ mask_small = cv2.resize(mask_rgb, (w_new, h_new))
402
+ foreground_small = cv2.resize(foreground, (w_new, h_new))
403
+
404
+ # Concatenate horizontally
405
+ combined = np.hstack([original_small, mask_small, foreground_small])
406
+
407
+ # Add labels
408
+ label_height = 30
409
+ labeled_img = np.zeros((combined.shape[0] + label_height, combined.shape[1], 3), dtype=np.uint8)
410
+ labeled_img[label_height:, :] = combined
411
+
412
+ # Add text labels
413
+ cv2.putText(labeled_img, "Original", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
414
+ cv2.putText(labeled_img, "Mask", (w_new + 10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
415
+ cv2.putText(labeled_img, "Foreground", (2*w_new + 10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
416
+
417
+ info = f"""
418
+ ### GrabCut Background Subtraction Applied
419
+ - **Margin**: {margin_percent}% from edges
420
+ - **Iterations**: {iterations}
421
+ - **Algorithm**: Graph cuts with Gaussian Mixture Models
422
+ - **Output**: Shows Original | Mask | Extracted Foreground
423
+ - **Use Case**: Object extraction, background removal, photo editing
424
+
425
+ **How it works**:
426
+ 1. Rectangle defines initial foreground region (inside margins)
427
+ 2. GMM models learn foreground/background color distributions
428
+ 3. Graph cuts optimize the boundary between them
429
+ 4. White mask = foreground, Black = background
430
+ """
431
+
432
+ return labeled_img, foreground, info
433
+
434
+
435
  def analyze_image_stats(image):
436
  """Provide detailed statistical analysis"""
437
  if isinstance(image, Image.Image):
 
672
  inputs=[input_image, thresh_method, thresh_value, thresh_max],
673
  outputs=[thresh_output, thresh_info]
674
  )
675
+
676
+ # GrabCut Background Subtraction Tab
677
+ with gr.TabItem("✂️ Background Subtraction (GrabCut)"):
678
+ gr.Markdown("""
679
+ **GrabCut** is an advanced algorithm for extracting foreground objects from images.
680
+ It uses graph cuts and Gaussian Mixture Models to intelligently separate foreground from background.
681
+
682
+ Perfect for: Product photography, portrait backgrounds, object isolation
683
+ """)
684
+
685
+ with gr.Row():
686
+ grabcut_margin = gr.Slider(5, 25, value=10, step=1, label="Margin from Edges (%)")
687
+ grabcut_iter = gr.Slider(1, 10, value=5, step=1, label="Iterations")
688
+
689
+ grabcut_btn = gr.Button("Extract Foreground", variant="primary")
690
+
691
+ with gr.Row():
692
+ grabcut_output = gr.Image(label="Comparison View (Original | Mask | Foreground)")
693
+
694
+ with gr.Row():
695
+ grabcut_foreground = gr.Image(label="Extracted Foreground (Full Size)")
696
+ grabcut_info = gr.Markdown()
697
+
698
+ grabcut_btn.click(
699
+ fn=apply_grabcut,
700
+ inputs=[input_image, grabcut_margin, grabcut_iter],
701
+ outputs=[grabcut_output, grabcut_foreground, grabcut_info]
702
+ )
703
 
704
  # Documentation
705
  with gr.Accordion("📚 Filter Documentation", open=False):
 
750
  - **Binary**: Simple cutoff threshold
751
  - **Otsu**: Automatically finds optimal threshold
752
  - **Adaptive**: Different thresholds for different regions
753
+
754
+ #### GrabCut Background Subtraction
755
+ - **Purpose**: Extract foreground objects from images
756
+ - **How it works**: Uses graph cuts and Gaussian Mixture Models (GMM)
757
+ - **Margin**: Defines initial foreground region (rectangle inside margins)
758
+ - **Iterations**: More iterations = more accurate segmentation (but slower)
759
+ - **Use Case**: Product photography, portrait background removal, object isolation
760
+ - **Algorithm**: Iteratively learns color distributions of foreground/background
761
+ - **Output**: Binary mask and extracted foreground object
762
  """)
763
 
764
  if __name__ == "__main__":