#!/usr/bin/env python3
"""
LoRA SAM实现

基于LoRA的SAM模型训练实现
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict, Any, Optional, Tuple
import sys
from pathlib import Path

# 添加SAM源码路径
project_root = Path(__file__).parent.parent.parent.parent
sam_path = project_root / "external" / "segment-anything"
sys.path.insert(0, str(sam_path))

try:
    from segment_anything import sam_model_registry, SamPredictor
    from segment_anything.modeling import Sam
except ImportError:
    print("警告: 无法导入SAM模块，请先运行setup_gpu_environment.py")
    Sam = None
    sam_model_registry = None
    SamPredictor = None

from peft import get_peft_model, LoraConfig
from .lora_config import LoRAConfig
from ...utils.logger import get_logger


class SAMLoRA(nn.Module):
    """LoRA SAM模型"""
    
    def __init__(self, 
                 base_model: str = "sam_vit_b",
                 checkpoint_path: Optional[str] = None,
                 lora_config: Optional[Dict[str, Any]] = None,
                 device: str = "cuda"):
        """
        初始化LoRA SAM模型
        
        Args:
            base_model: 基础模型类型
            checkpoint_path: 检查点路径
            lora_config: LoRA配置
            device: 设备
        """
        super().__init__()
        
        self.logger = get_logger("SAMLoRA")
        self.device = device
        self.base_model = base_model
        
        # 加载基础SAM模型
        if Sam is None or sam_model_registry is None:
            raise ImportError("SAM模块未正确安装，请运行setup_gpu_environment.py")
        
        self.sam = sam_model_registry[base_model](checkpoint=checkpoint_path)
        self.sam.to(device)
        
        # 配置LoRA
        if lora_config is None:
            lora_config = LoRAConfig()
        elif isinstance(lora_config, dict):
            lora_config = LoRAConfig.from_dict(lora_config)
        
        self.lora_config = lora_config
        self.peft_config = lora_config.to_peft_config()
        
        # 应用LoRA到图像编码器
        self.sam.image_encoder = get_peft_model(
            self.sam.image_encoder, 
            self.peft_config
        )
        
        # 冻结非LoRA参数
        self._freeze_non_lora_params()
        
        self.logger.info(f"LoRA SAM模型初始化完成: {base_model}")
        self.logger.info(f"LoRA配置: r={lora_config.r}, alpha={lora_config.lora_alpha}")
    
    def _freeze_non_lora_params(self):
        """冻结非LoRA参数"""
        for name, param in self.sam.named_parameters():
            if 'lora' not in name.lower():
                param.requires_grad = False
            else:
                param.requires_grad = True
        
        # 确保提示编码器和掩码解码器参数可训练
        for param in self.sam.prompt_encoder.parameters():
            param.requires_grad = True
        for param in self.sam.mask_decoder.parameters():
            param.requires_grad = True
    
    def forward(self, 
                batched_input: Dict[str, Any],
                multimask_output: bool = False) -> Dict[str, torch.Tensor]:
        """
        前向传播
        
        Args:
            batched_input: 批量输入
            multimask_output: 是否输出多个掩码
            
        Returns:
            输出字典
        """
        input_size = batched_input["image"].shape[-2:]
        input_images = batched_input["image"]
        
        # 图像编码
        image_embeddings = self.sam.image_encoder(input_images)
        
        # 提示编码
        points = batched_input.get("point_coords", None)
        labels = batched_input.get("point_labels", None)
        boxes = batched_input.get("boxes", None)
        mask_inputs = batched_input.get("mask_inputs", None)
        
        sparse_embeddings, dense_embeddings = self.sam.prompt_encoder(
            points=(points, labels) if points is not None else None,
            boxes=boxes,
            masks=mask_inputs,
        )
        
        # 掩码解码
        low_res_masks, iou_predictions = self.sam.mask_decoder(
            image_embeddings=image_embeddings,
            image_pe=self.sam.prompt_encoder.get_dense_pe(),
            sparse_prompt_embeddings=sparse_embeddings,
            dense_prompt_embeddings=dense_embeddings,
            multimask_output=multimask_output,
        )
        
        # 上采样到原始尺寸
        masks = F.interpolate(
            low_res_masks,
            size=input_size,
            mode="bilinear",
            align_corners=False,
        )
        
        return {
            "masks": masks,
            "iou_predictions": iou_predictions,
            "low_res_logits": low_res_masks
        }
    
    def training_step(self, batch: Dict[str, Any]) -> torch.Tensor:
        """
        训练步骤
        
        Args:
            batch: 训练批次
            
        Returns:
            损失值
        """
        # 前向传播
        outputs = self.forward(batch, multimask_output=True)
        
        # 计算损失
        loss = self.compute_loss(outputs, batch)
        
        return loss
    
    def validation_step(self, batch: Dict[str, Any]) -> torch.Tensor:
        """
        验证步骤
        
        Args:
            batch: 验证批次
            
        Returns:
            损失值
        """
        with torch.no_grad():
            outputs = self.forward(batch, multimask_output=True)
            loss = self.compute_loss(outputs, batch)
        
        return loss
    
    def compute_loss(self, 
                    outputs: Dict[str, torch.Tensor], 
                    batch: Dict[str, Any]) -> torch.Tensor:
        """
        计算损失
        
        Args:
            outputs: 模型输出
            batch: 批次数据
            
        Returns:
            损失值
        """
        masks_pred = outputs["masks"]
        iou_predictions = outputs["iou_predictions"]
        
        # 获取真实掩码
        masks_gt = batch["masks"]
        
        # 掩码损失（Dice损失）
        mask_loss = self.dice_loss(masks_pred, masks_gt)
        
        # IoU损失
        iou_loss = F.mse_loss(iou_predictions, self.compute_iou(masks_pred, masks_gt))
        
        # 总损失
        total_loss = mask_loss + iou_loss
        
        return total_loss
    
    def dice_loss(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
        """计算Dice损失"""
        pred = torch.sigmoid(pred)
        smooth = 1e-5
        
        intersection = (pred * target).sum(dim=(2, 3))
        union = pred.sum(dim=(2, 3)) + target.sum(dim=(2, 3))
        
        dice = (2.0 * intersection + smooth) / (union + smooth)
        dice_loss = 1.0 - dice.mean()
        
        return dice_loss
    
    def compute_iou(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
        """计算IoU"""
        pred = torch.sigmoid(pred) > 0.5
        target = target > 0.5
        
        intersection = (pred & target).sum(dim=(2, 3))
        union = (pred | target).sum(dim=(2, 3))
        
        iou = intersection / (union + 1e-8)
        return iou.mean(dim=1)
    
    def predict(self, 
               image: torch.Tensor,
               points: Optional[torch.Tensor] = None,
               labels: Optional[torch.Tensor] = None,
               boxes: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
        """
        预测接口
        
        Args:
            image: 输入图像
            points: 点提示
            labels: 点标签
            boxes: 框提示
            
        Returns:
            预测结果
        """
        self.eval()
        
        with torch.no_grad():
            batched_input = {
                "image": image.unsqueeze(0) if image.dim() == 3 else image,
            }
            
            if points is not None:
                batched_input["point_coords"] = points
            if labels is not None:
                batched_input["point_labels"] = labels
            if boxes is not None:
                batched_input["boxes"] = boxes
            
            outputs = self.forward(batched_input, multimask_output=True)
            
            # 选择最佳掩码
            masks = outputs["masks"]
            iou_scores = outputs["iou_predictions"]
            
            best_idx = iou_scores.argmax(dim=1)
            best_masks = masks[torch.arange(masks.size(0)), best_idx]
            
            return {
                "masks": best_masks,
                "iou_scores": iou_scores[torch.arange(iou_scores.size(0)), best_idx],
                "all_masks": masks,
                "all_iou_scores": iou_scores
            }
    
    def save_lora_weights(self, save_path: str):
        """保存LoRA权重"""
        lora_weights = {}
        
        for name, param in self.sam.named_parameters():
            if 'lora' in name.lower() and param.requires_grad:
                lora_weights[name] = param.cpu()
        
        torch.save(lora_weights, save_path)
        self.logger.info(f"LoRA权重已保存: {save_path}")
    
    def load_lora_weights(self, load_path: str):
        """加载LoRA权重"""
        lora_weights = torch.load(load_path, map_location=self.device)
        
        for name, param in self.sam.named_parameters():
            if name in lora_weights:
                param.data = lora_weights[name].to(self.device)
        
        self.logger.info(f"LoRA权重已加载: {load_path}")
    
    def get_trainable_parameters(self) -> int:
        """获取可训练参数数量"""
        trainable_params = 0
        total_params = 0
        
        for param in self.parameters():
            total_params += param.numel()
            if param.requires_grad:
                trainable_params += param.numel()
        
        return trainable_params, total_params
