"""
Advanced Segmentation using Segment Anything Model (SAM)
Optional enhancement for better object segmentation
"""

import numpy as np
import torch
import cv2
from typing import List, Dict, Optional


class SAMSegmentation:
    """
    Wrapper for Meta's Segment Anything Model (SAM)
    Provides automatic object segmentation
    """
    
    def __init__(self, checkpoint_path: str, model_type: str = "vit_h"):
        """
        Initialize SAM model
        
        Args:
            checkpoint_path: Path to SAM model checkpoint
            model_type: Model type ('vit_h', 'vit_l', 'vit_b')
        """
        try:
            from segment_anything import sam_model_registry, SamPredictor
            self.SamPredictor = SamPredictor
            self.sam_model_registry = sam_model_registry
        except ImportError:
            print("Error: segment-anything not installed")
            print("Install with: pip install git+https://github.com/facebookresearch/segment-anything.git")
            raise
        
        self.model_type = model_type
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        
        # Load model
        print(f"Loading SAM model from {checkpoint_path}...")
        self.sam = self.sam_model_registry[model_type](checkpoint=checkpoint_path)
        self.sam.to(device=self.device)
        self.predictor = self.SamPredictor(self.sam)
        
        print(f"SAM model loaded successfully on {self.device}")
    
    def segment(self, rgb_image: np.ndarray) -> Dict:
        """
        Perform automatic segmentation on RGB image
        
        Args:
            rgb_image: (H, W, 3) RGB image (uint8)
            
        Returns:
            Dictionary containing:
            - masks: List of binary masks
            - scores: Confidence scores
            - labels: Object IDs
        """
        # Set image
        self.predictor.set_image(rgb_image)
        
        # Generate masks automatically
        masks, scores, logits = self.predictor.automatic_mask_generation(
            rgb_image,
            points_per_side=32,
            pred_iou_thresh=0.88,
            stability_score_thresh=0.95,
            crop_n_layers=1,
            crop_n_points_downscale_factor=2,
            min_mask_region_area=100,
        )
        
        # Convert to numpy array format
        seg_mask = np.zeros(rgb_image.shape[:2], dtype=np.int32)
        for idx, mask_info in enumerate(masks):
            mask = mask_info['segmentation']
            seg_mask[mask] = idx + 1
        
        return {
            "masks": masks,
            "scores": scores,
            "seg": seg_mask
        }
    
    def segment_from_points(self, rgb_image: np.ndarray, points: List[List[int]]):
        """
        Segment objects from specific points
        
        Args:
            rgb_image: (H, W, 3) RGB image
            points: List of [x, y] coordinates
            
        Returns:
            Dictionary containing masks
        """
        self.predictor.set_image(rgb_image)
        
        input_points = np.array(points)
        input_labels = np.ones(len(points))
        
        masks, scores, logits = self.predictor.predict(
            point_coords=input_points,
            point_labels=input_labels,
            multimask_output=True,
        )
        
        return {
            "masks": masks,
            "scores": scores
        }


class SimpleSegmentation:
    """
    Simple segmentation based on depth and color
    Fallback option if SAM is not available
    """
    
    @staticmethod
    def segment_by_depth_color(depth: np.ndarray, rgb: np.ndarray) -> np.ndarray:
        """
        Segment objects using depth and color information
        
        Args:
            depth: (H, W) Depth image
            rgb: (H, W, 3) RGB image
            
        Returns:
            (H, W) Segmentation mask
        """
        # Filter by depth
        depth_mask = np.logical_and(depth > 0.3, depth < 2.0)
        
        # Convert to grayscale
        gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY)
        
        # Apply threshold
        _, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        
        # Combine masks
        combined_mask = np.logical_and(depth_mask, binary > 0)
        
        # Connected component analysis
        num_labels, labels = cv2.connectedComponents(combined_mask.astype(np.uint8))
        
        return labels.astype(np.int32)
    
    @staticmethod
    def segment_by_clustering(rgb: np.ndarray, n_clusters: int = 3) -> np.ndarray:
        """
        Segment objects using K-means clustering
        
        Args:
            rgb: (H, W, 3) RGB image
            n_clusters: Number of clusters
            
        Returns:
            (H, W) Segmentation mask
        """
        from sklearn.cluster import KMeans
        
        # Reshape to (N, 3)
        pixels = rgb.reshape(-1, 3)
        
        # Apply K-means
        kmeans = KMeans(n_clusters=n_clusters, random_state=0, n_init=10)
        labels = kmeans.fit_predict(pixels)
        
        # Reshape back
        mask = labels.reshape(rgb.shape[:2])
        
        # Filter largest cluster (usually background)
        unique, counts = np.unique(mask, return_counts=True)
        largest_cluster = unique[np.argmax(counts)]
        mask[mask == largest_cluster] = 0
        
        return mask.astype(np.int32)


def test_sam_segmentation():
    """
    Test SAM segmentation (if available)
    """
    try:
        # Initialize SAM (requires checkpoint)
        checkpoint_path = "sam_vit_h_4b8939.pth"
        
        if not os.path.exists(checkpoint_path):
            print(f"Checkpoint not found: {checkpoint_path}")
            print("Download from: https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth")
            return
        
        sam = SAMSegmentation(checkpoint_path)
        
        # Load test image
        test_image = cv2.imread("test_rgb.jpg")
        if test_image is None:
            print("Test image not found")
            return
        
        test_image = cv2.cvtColor(test_image, cv2.COLOR_BGR2RGB)
        
        # Segment
        result = sam.segment(test_image)
        
        print(f"Found {len(result['masks'])} objects")
        
    except Exception as e:
        print(f"Error: {e}")


if __name__ == "__main__":
    import os
    test_sam_segmentation()

