# ultra-sam-light/models/ultrasam.py
import os
from pathlib import Path
from typing import Optional, Tuple, Union

import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F

# ----------------------------
# Lightweight UNet-like model
# ----------------------------
class DoubleConv(nn.Module):
    def __init__(self, in_ch, out_ch):
        super().__init__()
        self.net = nn.Sequential(
            nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_ch),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_ch),
            nn.ReLU(inplace=True),
        )

    def forward(self, x):
        return self.net(x)


class Down(nn.Module):
    def __init__(self, in_ch, out_ch):
        super().__init__()
        self.pool_conv = nn.Sequential(
            nn.MaxPool2d(2),
            DoubleConv(in_ch, out_ch)
        )

    def forward(self, x):
        return self.pool_conv(x)


class Up(nn.Module):
    def __init__(self, in_ch, out_ch, bilinear=True):
        super().__init__()
        if bilinear:
            self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
            self.conv = DoubleConv(in_ch, out_ch)
        else:
            self.up = nn.ConvTranspose2d(in_ch // 2, in_ch // 2, kernel_size=2, stride=2)
            self.conv = DoubleConv(in_ch, out_ch)

    def forward(self, x1, x2):
        x1 = self.up(x1)
        # pad if needed (in case of odd sizes)
        diffY = x2.size()[2] - x1.size()[2]
        diffX = x2.size()[3] - x1.size()[3]
        if diffY != 0 or diffX != 0:
            x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
                            diffY // 2, diffY - diffY // 2])
        x = torch.cat([x2, x1], dim=1)
        return self.conv(x)


class UltraSAMNet(nn.Module):
    """
    A small UNet-like model for binary segmentation.
    Output: logits with 1 channel (no activation).
    """
    def __init__(self, in_channels=3, base_c=32, bilinear=True):
        super().__init__()
        self.inc = DoubleConv(in_channels, base_c)
        self.down1 = Down(base_c, base_c * 2)
        self.down2 = Down(base_c * 2, base_c * 4)
        self.down3 = Down(base_c * 4, base_c * 8)
        self.down4 = Down(base_c * 8, base_c * 8)

        self.up1 = Up(base_c * 16, base_c * 4, bilinear)
        self.up2 = Up(base_c * 8, base_c * 2, bilinear)
        self.up3 = Up(base_c * 4, base_c, bilinear)
        self.up4 = Up(base_c * 2, base_c, bilinear)

        self.outc = nn.Conv2d(base_c, 1, kernel_size=1)

    def forward(self, x):
        # x: (B, C, H, W)
        x1 = self.inc(x)
        x2 = self.down1(x1)
        x3 = self.down2(x2)
        x4 = self.down3(x3)
        x5 = self.down4(x4)

        x = self.up1(x5, x4)
        x = self.up2(x, x3)
        x = self.up3(x, x2)
        x = self.up4(x, x1)
        logits = self.outc(x)
        return logits


# ----------------------------
# UltraSAM wrapper for inference
# ----------------------------
class UltraSAM:
    """
    Wrapper that manages a UltraSAMNet for inference.

    Usage:
        us = UltraSAM(weight_path="sam_weights/UltraSam.pth", device="cuda")
        mask = us.infer("image.png")  # returns numpy (H, W) binary mask
    """
    def __init__(
        self,
        weight_path: Optional[Union[str, Path]] = "sam_weights/UltraSam.pth",
        device: str = "cuda",
        input_size: Tuple[int, int] = (512, 512),
        threshold: float = 0.5,
    ):
        self.input_size = tuple(input_size)
        self.threshold = float(threshold)
        self.device = torch.device(device if torch.cuda.is_available() else "cpu")

        # instantiate model
        self.model = UltraSAMNet(in_channels=3, base_c=32)
        self.model.to(self.device)
        self.model.eval()

        # try to load weights if provided
        self.weight_path = None
        if weight_path is not None:
            self.weight_path = Path(weight_path)
            if self.weight_path.exists():
                self._load_weights(self.weight_path)
            else:
                print(f"⚠️ UltraSAM: weight file not found at {self.weight_path}. Using randomly initialized weights.")
        else:
            print("⚠️ UltraSAM: no weight_path provided. Using randomly initialized weights.")

    def _load_weights(self, ckpt_path: Path):
        try:
            ckpt = torch.load(str(ckpt_path), map_location=self.device)
            # common patterns
            if isinstance(ckpt, dict) and "state_dict" in ckpt:
                state = ckpt["state_dict"]
            else:
                state = ckpt

            # remove 'module.' prefix if present (DataParallel)
            new_state = {}
            for k, v in state.items():
                new_k = k
                if k.startswith("module."):
                    new_k = k[len("module."):]
                new_state[new_k] = v

            # Attempt strict load first, fallback to non-strict
            try:
                self.model.load_state_dict(new_state, strict=True)
                print(f"✅ Weights loaded (strict) from {ckpt_path}")
            except Exception as e:
                self.model.load_state_dict(new_state, strict=False)
                print(f"⚠️ Weights loaded (non-strict) from {ckpt_path}. Note: {e}")
        except Exception as e:
            print(f"❌ Failed to load weights from {ckpt_path}: {e}")

    # ----------------------------
    # Preprocess / Postprocess
    # ----------------------------
    def _preprocess_image(self, image_path: str) -> torch.Tensor:
        img = cv2.imread(image_path, cv2.IMREAD_COLOR)
        if img is None:
            raise ValueError(f"UltraSAM: cannot read image: {image_path}")
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        # resize preserving aspect ratio is possible, but here we resize to input_size
        img = cv2.resize(img, (self.input_size[1], self.input_size[0]))
        img = img.astype(np.float32) / 255.0
        # normalize (common mean/std) — you can change to match training stats
        mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)
        std = np.array([0.229, 0.224, 0.225], dtype=np.float32)
        img = (img - mean) / std
        tensor = torch.from_numpy(img).permute(2, 0, 1).unsqueeze(0)  # (1,3,H,W)
        return tensor.to(self.device)

    def _postprocess_mask(self, logits: torch.Tensor) -> np.ndarray:
        """
        logits: (1,1,H,W) or (1,H,W)
        returns binary mask uint8 (H, W) with values {0,255}
        """
        with torch.no_grad():
            probs = torch.sigmoid(logits)
            probs = probs.squeeze(0).squeeze(0)  # (H, W)
            mask = (probs > self.threshold).cpu().numpy().astype(np.uint8) * 255
            return mask

    # ----------------------------
    # Public API
    # ----------------------------
    def infer(self, image_or_path: Union[str, np.ndarray, torch.Tensor], save_path: Optional[Union[str, Path]] = None) -> np.ndarray:
        """
        Perform inference.

        Args:
            image_or_path: path to image (str), or numpy array (H,W,3) in RGB/BGR, or torch.Tensor (1,3,H,W)
            save_path: optional path to save mask image (PNG)

        Returns:
            mask: numpy array (H, W) uint8 with values 0 or 255
        """
        # prepare tensor
        if isinstance(image_or_path, str):
            tensor = self._preprocess_image(image_or_path)
            orig_path = image_or_path
        elif isinstance(image_or_path, np.ndarray):
            # assume H,W,3 BGR or RGB? We'll treat as BGR (cv2 read), convert to RGB
            img = image_or_path
            if img.ndim == 2:
                img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
            elif img.shape[2] == 3:
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img = cv2.resize(img, (self.input_size[1], self.input_size[0]))
            img = img.astype(np.float32) / 255.0
            mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)
            std = np.array([0.229, 0.224, 0.225], dtype=np.float32)
            img = (img - mean) / std
            tensor = torch.from_numpy(img).permute(2, 0, 1).unsqueeze(0).to(self.device)
            orig_path = None
        elif isinstance(image_or_path, torch.Tensor):
            tensor = image_or_path.to(self.device)
            orig_path = None
        else:
            raise ValueError("UltraSAM.infer: unsupported input type")

        # inference
        with torch.no_grad():
            logits = self.model(tensor)  # expect (1,1,H,W)
            # if model returns (B,H,W) or (B,1,H,W), handle both
            if logits.ndim == 3:
                logits = logits.unsqueeze(1)
            elif logits.ndim == 4 and logits.shape[1] != 1:
                # if multi-channel, reduce to single-channel via max or select one channel
                # here we take channel 0
                logits = logits[:, :1, :, :]

        mask = self._postprocess_mask(logits)  # (H, W) uint8 0/255

        # optionally save
        if save_path is not None:
            save_path = Path(save_path)
            save_path.parent.mkdir(parents=True, exist_ok=True)
            cv2.imwrite(str(save_path), mask)
            print(f"UltraSAM: saved mask -> {save_path}")
        elif orig_path is not None:
            # default save location: ./output/<basename>_mask.png
            out_dir = Path("output")
            out_dir.mkdir(parents=True, exist_ok=True)
            out_path = out_dir / (Path(orig_path).stem + "_mask.png")
            cv2.imwrite(str(out_path), mask)
            print(f"UltraSAM: saved mask -> {out_path}")

        return mask


# If run as script, demonstrate usage
if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("--image", type=str, required=True, help="Path to input image")
    parser.add_argument("--weights", type=str, default="sam_weights/UltraSam.pth", help="Path to weights .pth (optional)")
    parser.add_argument("--out", type=str, default=None, help="Optional output mask path")
    parser.add_argument("--size", type=int, nargs=2, default=[512,512], help="Input size H W")
    args = parser.parse_args()

    us = UltraSAM(weight_path=args.weights, device="cuda", input_size=tuple(args.size))
    mask = us.infer(args.image, save_path=args.out)
    print("Done. mask shape:", mask.shape)
