#!/usr/bin/env python3
"""
统一Prompt生成器

实现完整的prompt工程工作流程：
1. 第一帧：自动生成点提示 → SAM → 分割结果
2. 后续帧：分割结果 → 三个模块处理 → 融合 → 掩码提示 → SAM → 分割结果
3. 结果整合：所有帧的分割结果 → 视频/最优图/置信度/LVEF等
"""

import torch
import torch.nn as nn
import numpy as np
from typing import Dict, List, Tuple, Optional, Any, Union
from pathlib import Path

import sys
sys.path.append(str(Path(__file__).parent.parent.parent))
from .auto_point_prompt import AutoPointPromptGenerator
from .memory_prompt import MemoryMaskPromptGenerator
from .periodic_prompt import PeriodicPromptGenerator
from .quality_prompt import QualityAwarePromptGenerator
from .nlp_prompt import NLPromptGenerator
from .prompt_fusion import PromptFusion
from .input_prompt import InputPromptHandler
from utils.logger import get_logger


class UnifiedPromptGenerator(nn.Module):
    """统一Prompt生成器"""
    
    def __init__(self,
                 input_dim: int = 256,
                 prompt_dim: int = 256,
                 enable_memory: bool = True,
                 enable_periodic: bool = True,
                 enable_quality: bool = True,
                 enable_nlp: bool = False,
                 fusion_method: str = "attention",
                 device: str = "cuda"):
        """
        初始化统一Prompt生成器
        
        Args:
            input_dim: 输入特征维度
            prompt_dim: 提示特征维度
            enable_memory: 启用记忆模块
            enable_periodic: 启用周期模块
            enable_quality: 启用质量控制模块
            enable_nlp: 启用NLP模块
            fusion_method: 融合方法
            device: 设备
        """
        super().__init__()
        
        self.input_dim = input_dim
        self.prompt_dim = prompt_dim
        self.device = device
        self.fusion_method = fusion_method
        self.logger = get_logger("UnifiedPromptGenerator")
        
        # 自动点提示生成器（第一帧使用）
        self.auto_point_generator = AutoPointPromptGenerator(
            input_dim=input_dim,
            prompt_dim=prompt_dim,
            device=device
        )
        
        # 核心处理模块
        self.memory_module = None
        if enable_memory:
            self.memory_module = MemoryMaskPromptGenerator(
                input_dim=input_dim,
                prompt_dim=prompt_dim,
                device=device
            )
        
        self.periodic_module = None
        if enable_periodic:
            self.periodic_module = PeriodicPromptGenerator(
                input_dim=input_dim,
                prompt_dim=prompt_dim,
                device=device
            )
        
        self.quality_module = None
        if enable_quality:
            self.quality_module = QualityAwarePromptGenerator(
                input_dim=input_dim,
                prompt_dim=prompt_dim,
                device=device
            )
        
        # NLP模块（可选）
        self.nlp_module = None
        if enable_nlp:
            self.nlp_module = NLPromptGenerator(
                input_dim=input_dim,
                prompt_dim=prompt_dim,
                device=device
            )
        
        # 融合模块
        self.fusion_module = PromptFusion(
            prompt_dim=prompt_dim,
            fusion_method=fusion_method,
            device=device
        )
        
        # 输入提示处理器
        self.input_handler = InputPromptHandler(device)
        
        # 置信度评估器
        self.confidence_estimator = nn.Sequential(
            nn.Linear(prompt_dim, prompt_dim // 2),
            nn.ReLU(),
            nn.Linear(prompt_dim // 2, 1),
            nn.Sigmoid()
        )
        
        self.logger.info(f"统一Prompt生成器初始化完成")
    
    def process_initial_prompt(self,
                              initial_prompt_type: str,
                              initial_prompt_data: Any,
                              image: torch.Tensor) -> Dict[str, torch.Tensor]:
        """
        处理初始提示
        
        Args:
            initial_prompt_type: 提示类型 ('point', 'box', 'mask', 'nlp')
            initial_prompt_data: 提示数据
            image: 输入图像 (B, C, H, W)
            
        Returns:
            SAM格式的提示
        """
        if initial_prompt_type == 'point':
            # 点提示：直接格式化
            if isinstance(initial_prompt_data, dict):
                coords = initial_prompt_data.get('coords')
                labels = initial_prompt_data.get('labels', None)
            else:
                coords = initial_prompt_data
                labels = None
            return self.input_handler.process_point_prompt(coords, labels)
        
        elif initial_prompt_type == 'box':
            # 框提示：直接格式化
            return self.input_handler.process_box_prompt(initial_prompt_data)
        
        elif initial_prompt_type == 'mask':
            # 掩码提示：直接格式化
            return self.input_handler.process_mask_prompt(initial_prompt_data)
        
        elif initial_prompt_type == 'nlp':
            # NLP提示：文本→坐标→点提示
            if self.nlp_module is None:
                self.logger.warning("NLP模块未启用，使用自动点提示生成器")
                return self.auto_point_generator.generate_prompts(image)
            
            text_description = initial_prompt_data.get('text', '')
            image_shape = (image.shape[2], image.shape[3])
            video_length = initial_prompt_data.get('video_length', 1)
            frame_indices = initial_prompt_data.get('frame_indices', None)
            
            nlp_prompt = self.nlp_module.generate_prompts(
                text_description, image_shape, video_length, frame_indices
            )
            return self.input_handler.format_for_sam(nlp_prompt)
        
        else:
            # 默认：自动生成点提示
            self.logger.info("使用自动点提示生成器")
            return self.auto_point_generator.generate_prompts(image)
    
    def process_subsequent_frame(self,
                                 current_frame: torch.Tensor,
                                 previous_mask: torch.Tensor,
                                 frame_history: List[torch.Tensor],
                                 frame_index: int) -> Dict[str, torch.Tensor]:
        """
        处理后续帧
        
        Args:
            current_frame: 当前帧图像 (B, C, H, W)
            previous_mask: 上一帧的分割掩码 (B, 1, H, W)
            frame_history: 历史帧列表
            frame_index: 当前帧索引
            
        Returns:
            融合后的掩码提示
        """
        # 提取当前帧特征
        if self.memory_module:
            current_features = self.memory_module.extract_features(current_frame)
        else:
            current_features = self.auto_point_generator.extract_features(current_frame)
        
        # 记忆模块处理
        memory_prompts = None
        if self.memory_module:
            # 将上一帧掩码添加到记忆库
            self.memory_module.add_to_memory(
                current_features,
                previous_mask,
                {'frame_index': frame_index}
            )
            # 生成记忆引导的提示
            memory_prompts = self.memory_module.generate_prompts(
                current_frame,
                {'current_mask': previous_mask}
            )
        
        # 周期模块处理
        periodic_prompts = None
        if self.periodic_module:
            # 构建时序序列
            temporal_sequence = []
            for hist_frame in frame_history[-self.periodic_module.sequence_length:]:
                features = self.periodic_module.extract_features(hist_frame)
                temporal_sequence.append(features)
            
            # 添加当前帧
            current_features_periodic = self.periodic_module.extract_features(current_frame)
            temporal_sequence.append(current_features_periodic)
            
            # 堆叠为序列
            if len(temporal_sequence) > 0:
                temporal_features = torch.stack(temporal_sequence, dim=1)  # (B, T, C, H, W)
                # 展平空间维度
                B, T, C, H, W = temporal_features.shape
                temporal_features = temporal_features.view(B, T, C * H * W)
                # 投影到input_dim
                if temporal_features.shape[-1] != self.input_dim:
                    if not hasattr(self, '_temporal_proj'):
                        self._temporal_proj = nn.Linear(temporal_features.shape[-1], self.input_dim).to(self.device)
                    temporal_features = self._temporal_proj(temporal_features)
                
                periodic_prompts = self.periodic_module.forward(
                    current_features_periodic,
                    {'temporal_sequence': temporal_features}
                )
        
        # 质量控制模块处理
        quality_prompts = None
        if self.quality_module:
            quality_prompts = self.quality_module.generate_prompts(
                current_frame,
                {'previous_mask': previous_mask}
            )
        
        # 融合三个模块的输出
        image_shape = (current_frame.shape[2], current_frame.shape[3])
        fused_prompt = self.fusion_module.forward(
            memory_prompts,
            periodic_prompts,
            quality_prompts,
            image_shape
        )
        
        return fused_prompt
    
    def process_video(self,
                     video: Union[List[torch.Tensor], torch.Tensor],
                     initial_prompt_type: str = 'auto',
                     initial_prompt_data: Optional[Any] = None,
                     sam_model: Optional[Any] = None) -> Dict[str, Any]:
        """
        处理整个视频
        
        Args:
            video: 视频帧列表或tensor (T, C, H, W) 或 (B, T, C, H, W)
            initial_prompt_type: 初始提示类型 ('point', 'box', 'mask', 'nlp', 'auto')
            initial_prompt_data: 初始提示数据
            sam_model: SAM模型（如果提供，会进行实际分割）
            
        Returns:
            包含所有帧分割结果、置信度等的字典
        """
        # 转换为列表格式
        if isinstance(video, torch.Tensor):
            if len(video.shape) == 4:
                # (T, C, H, W)
                frames = [video[i] for i in range(video.shape[0])]
            else:
                # (B, T, C, H, W)
                frames = [video[0, i] for i in range(video.shape[1])]
        else:
            frames = video
        
        num_frames = len(frames)
        self.logger.info(f"处理视频，共{num_frames}帧")
        
        # 确保所有帧都是tensor并添加batch维度
        frames = [f.unsqueeze(0) if len(f.shape) == 3 else f for f in frames]
        
        results = []
        confidences = []
        frame_history = []
        
        # 第一帧：使用初始提示
        first_frame = frames[0].to(self.device)
        first_prompt = self.process_initial_prompt(
            initial_prompt_type, initial_prompt_data or {}, first_frame
        )
        
        # 如果提供了SAM模型，进行分割
        if sam_model is not None:
            first_mask = self._segment_with_sam(sam_model, first_frame, first_prompt)
        else:
            # 模拟分割结果（用于测试）
            first_mask = torch.zeros(1, 1, first_frame.shape[2], first_frame.shape[3], device=self.device)
        
        results.append(first_mask)
        confidences.append(first_prompt.get('confidence', torch.tensor(0.5, device=self.device)))
        frame_history.append(first_frame)
        
        # 后续帧：使用三个模块生成掩码提示
        for frame_idx in range(1, num_frames):
            current_frame = frames[frame_idx].to(self.device)
            previous_mask = results[-1]
            
            # 生成掩码提示
            mask_prompt = self.process_subsequent_frame(
                current_frame,
                previous_mask,
                frame_history,
                frame_idx
            )
            
            # 如果提供了SAM模型，进行分割
            if sam_model is not None:
                current_mask = self._segment_with_sam(sam_model, current_frame, mask_prompt)
            else:
                # 模拟分割结果
                current_mask = previous_mask.clone()
            
            results.append(current_mask)
            confidences.append(mask_prompt.get('confidence', torch.tensor(0.5, device=self.device)))
            frame_history.append(current_frame)
            
            # 限制历史长度
            if len(frame_history) > self.periodic_module.sequence_length if self.periodic_module else 10:
                frame_history = frame_history[-self.periodic_module.sequence_length:]
        
        # 整合结果
        return self._integrate_results(results, confidences, frames)
    
    def _segment_with_sam(self,
                         sam_model: Any,
                         image: torch.Tensor,
                         prompt: Dict[str, torch.Tensor]) -> torch.Tensor:
        """
        使用SAM模型进行分割
        
        Args:
            sam_model: SAM模型接口对象（SAMInterface）
            image: 输入图像 (B, C, H, W)
            prompt: 提示字典
            
        Returns:
            分割掩码 (B, 1, H, W)
        """
        if sam_model is None:
            self.logger.warning("SAM模型未提供，返回零掩码")
            return torch.zeros(image.shape[0], 1, image.shape[2], image.shape[3], device=self.device)
        
        # 检查是否是SAMInterface实例
        if hasattr(sam_model, 'segment'):
            # 使用SAMInterface
            masks = []
            for i in range(image.shape[0]):
                single_image = image[i]  # (C, H, W)
                single_prompt = {k: v[i] if isinstance(v, torch.Tensor) and len(v.shape) > 1 else v 
                               for k, v in prompt.items()}
                mask = sam_model.segment(single_image, single_prompt)
                if isinstance(mask, np.ndarray):
                    mask = torch.from_numpy(mask).float().to(self.device)
                masks.append(mask)
            
            masks_tensor = torch.stack(masks).unsqueeze(1)  # (B, 1, H, W)
            return masks_tensor
        else:
            # 兼容其他SAM接口
            self.logger.warning("SAM模型接口类型未知，返回零掩码")
            return torch.zeros(image.shape[0], 1, image.shape[2], image.shape[3], device=self.device)
    
    def _integrate_results(self,
                          results: List[torch.Tensor],
                          confidences: List[torch.Tensor],
                          frames: List[torch.Tensor]) -> Dict[str, Any]:
        """
        整合所有帧的分割结果
        
        Args:
            results: 分割结果列表
            confidences: 置信度列表
            frames: 原始帧列表
            
        Returns:
            整合后的结果字典
        """
        # 堆叠所有帧的结果
        video_masks = torch.stack(results, dim=1)  # (B, T, 1, H, W)
        video_confidences = torch.stack(confidences, dim=1)  # (B, T, 1)
        
        # 找到置信度最高的帧
        best_frame_idx = torch.argmax(video_confidences.squeeze(-1), dim=1)
        best_masks = []
        for i, idx in enumerate(best_frame_idx):
            best_masks.append(video_masks[i, idx.item()])
        best_mask = torch.stack(best_masks, dim=0)  # (B, 1, H, W)
        
        # 计算平均置信度
        avg_confidence = torch.mean(video_confidences)
        
        return {
            'video_masks': video_masks,  # 完整视频分割结果
            'best_mask': best_mask,  # 最优分割图
            'best_frame_idx': best_frame_idx,  # 最优帧索引
            'confidences': video_confidences,  # 每帧置信度
            'avg_confidence': avg_confidence,  # 平均置信度
            'num_frames': len(results)
        }
    
    def compute_confidence(self, mask: torch.Tensor) -> torch.Tensor:
        """
        计算分割结果的置信度
        
        Args:
            mask: 分割掩码 (B, 1, H, W)
            
        Returns:
            置信度分数 (B, 1)
        """
        # 提取特征
        features = self.auto_point_generator.extract_features(
            mask.repeat(1, 3, 1, 1)  # 转换为3通道
        )
        global_features = torch.nn.functional.adaptive_avg_pool2d(features, (1, 1))
        global_features = global_features.view(features.shape[0], -1)
        global_features = self.auto_point_generator.input_projection(global_features)
        
        return self.confidence_estimator(global_features)
    
    def generate_initial_prompts(self,
                                 image: torch.Tensor,
                                 context: Optional[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
        """
        生成初始提示（第一帧使用）
        
        Args:
            image: 输入图像 (B, C, H, W)
            context: 上下文信息，可能包含 'points', 'boxes', 'masks', 'nlp_text'
            
        Returns:
            提示字典
        """
        # 检查是否有直接输入的提示
        if context:
            # 如果有直接输入的点/框/掩码，直接返回
            if 'points' in context and context['points'] is not None:
                return {'points': context['points'].to(self.device)}
            if 'boxes' in context and context['boxes'] is not None:
                return {'boxes': context['boxes'].to(self.device)}
            if 'masks' in context and context['masks'] is not None:
                return {'masks': context['masks'].to(self.device)}
            
            # 如果有NLP文本，使用NLP模块生成
            if 'nlp_text' in context and context['nlp_text'] is not None and self.nlp_module:
                nlp_output = self.nlp_module.generate_prompts(image, context['nlp_text'])
                return nlp_output
        
        # 默认：使用自动点提示生成器
        return self.auto_point_generator.generate_prompts(image)
    
    def forward(self,
                image: torch.Tensor,
                context: Optional[Dict[str, Any]] = None,
                is_first_frame: bool = False) -> Dict[str, torch.Tensor]:
        """
        前向传播
        
        Args:
            image: 输入图像 (B, C, H, W)
            context: 上下文信息
            is_first_frame: 是否为第一帧
            
        Returns:
            提示字典
        """
        if is_first_frame:
            return self.generate_initial_prompts(image, context)
        else:
            # 后续帧：使用多模块处理
            previous_mask = context.get('prev_masks') if context else None
            if previous_mask is None:
                self.logger.warning("后续帧缺少上一帧掩码，使用自动点提示生成器")
                return self.auto_point_generator.generate_prompts(image)
            
            frame_history = context.get('frame_history', []) if context else []
            frame_idx = context.get('frame_idx', torch.tensor(0)) if context else torch.tensor(0)
            
            return self.process_subsequent_frame(
                image,
                previous_mask,
                frame_history,
                frame_idx.item() if isinstance(frame_idx, torch.Tensor) else frame_idx
            )
    
    def compute_loss(self,
                    predictions: Dict[str, torch.Tensor],
                    targets: Dict[str, torch.Tensor]) -> torch.Tensor:
        """
        计算损失
        
        Args:
            predictions: 预测结果
            targets: 目标结果
            
        Returns:
            损失值
        """
        total_loss = None
        
        # 点提示损失
        if 'points' in predictions and 'points' in targets:
            point_loss = nn.functional.mse_loss(predictions['points'], targets['points'])
            if total_loss is None:
                total_loss = point_loss
            else:
                total_loss = total_loss + point_loss
        
        # 掩码提示损失
        if 'masks' in predictions and 'masks' in targets:
            mask_loss = nn.functional.binary_cross_entropy_with_logits(
                predictions['masks'], targets['masks']
            )
            if total_loss is None:
                total_loss = mask_loss
            else:
                total_loss = total_loss + mask_loss
        
        # 框提示损失
        if 'boxes' in predictions and 'boxes' in targets:
            box_loss = nn.functional.mse_loss(predictions['boxes'], targets['boxes'])
            if total_loss is None:
                total_loss = box_loss
            else:
                total_loss = total_loss + box_loss
        
        # 如果没有任何损失，返回零损失tensor
        if total_loss is None:
            self.logger.warning("没有匹配的损失项，返回零损失")
            total_loss = torch.tensor(0.0, device=self.device, requires_grad=True)
        
        return total_loss
    
    def get_model_info(self) -> Dict[str, Any]:
        """
        获取模型信息
        
        Returns:
            模型信息字典
        """
        total_params = sum(p.numel() for p in self.parameters())
        trainable_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
        
        enabled_modules = []
        if self.memory_module is not None:
            enabled_modules.append('memory')
        if self.periodic_module is not None:
            enabled_modules.append('periodic')
        if self.quality_module is not None:
            enabled_modules.append('quality')
        if self.nlp_module is not None:
            enabled_modules.append('nlp')
        
        return {
            'model_name': 'UnifiedPromptGenerator',
            'input_dim': self.input_dim,
            'prompt_dim': self.prompt_dim,
            'enabled_modules': enabled_modules,
            'fusion_method': self.fusion_method,
            'total_params': total_params,
            'trainable_params': trainable_params,
            'device': self.device
        }
