#!/usr/bin/env python3
"""
记忆掩码提示生成器

基于历史分割结果的记忆掩码提示模块，利用之前的分割信息指导当前分割
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from typing import Dict, List, Tuple, Optional, Any
from collections import deque

import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent.parent))
from .base_prompt import BasePromptGenerator
from utils.logger import get_logger


class MemoryMaskPromptGenerator(BasePromptGenerator):
    """记忆掩码提示生成器"""
    
    def __init__(self, 
                 input_dim: int = 256,
                 prompt_dim: int = 256,
                 memory_size: int = 10,
                 memory_dim: int = 128,
                 device: str = "cuda"):
        """
        初始化记忆掩码提示生成器
        
        Args:
            input_dim: 输入特征维度
            prompt_dim: 提示特征维度
            memory_size: 记忆库大小
            memory_dim: 记忆特征维度
            device: 设备
        """
        super().__init__(input_dim, prompt_dim, device)
        
        self.memory_size = memory_size
        self.memory_dim = memory_dim
        
        # 记忆库
        self.memory_bank = deque(maxlen=memory_size)
        
        # 记忆编码器
        self.memory_encoder = nn.Sequential(
            nn.Conv2d(1, 64, 3, padding=1),  # 掩码输入
            nn.ReLU(),
            nn.Conv2d(64, 128, 3, padding=1),
            nn.ReLU(),
            nn.AdaptiveAvgPool2d((16, 16)),
            nn.Flatten(),
            nn.Linear(128 * 16 * 16, memory_dim),
            nn.ReLU()
        )
        
        # 记忆检索网络
        self.memory_retrieval = nn.Sequential(
            nn.Linear(input_dim + memory_dim, prompt_dim),
            nn.ReLU(),
            nn.Linear(prompt_dim, memory_size),
            nn.Softmax(dim=-1)
        )
        
        # 记忆融合网络
        self.memory_fusion = nn.Sequential(
            nn.Linear(prompt_dim + memory_dim, prompt_dim),
            nn.ReLU(),
            nn.Linear(prompt_dim, prompt_dim)
        )
        
        # 记忆引导的提示生成器
        self.memory_guided_generator = nn.ModuleDict({
            'points': nn.Sequential(
                nn.Linear(prompt_dim + memory_dim, prompt_dim // 2),
                nn.ReLU(),
                nn.Linear(prompt_dim // 2, 2)
            ),
            'boxes': nn.Sequential(
                nn.Linear(prompt_dim + memory_dim, prompt_dim // 2),
                nn.ReLU(),
                nn.Linear(prompt_dim // 2, 4)
            ),
            'masks': nn.Sequential(
                nn.Linear(prompt_dim + memory_dim, prompt_dim // 2),
                nn.ReLU(),
                nn.Linear(prompt_dim // 2, 32 * 32)
            )
        })
        
        # 记忆更新网络
        self.memory_update = nn.Sequential(
            nn.Linear(prompt_dim + memory_dim, memory_dim),
            nn.ReLU(),
            nn.Linear(memory_dim, memory_dim)
        )
        
        # 相似度计算
        self.similarity_net = nn.Sequential(
            nn.Linear(input_dim, memory_dim),
            nn.ReLU(),
            nn.Linear(memory_dim, memory_dim)
        )
        
        self.logger.info(f"记忆掩码提示生成器初始化完成: {self.get_model_info()}")
    
    def encode_memory(self, mask: torch.Tensor) -> torch.Tensor:
        """
        编码记忆掩码
        
        Args:
            mask: 掩码 (B, 1, H, W)
            
        Returns:
            记忆特征 (B, memory_dim)
        """
        return self.memory_encoder(mask)
    
    def add_to_memory(self, 
                     image_features: torch.Tensor,
                     mask: torch.Tensor,
                     metadata: Optional[Dict[str, Any]] = None):
        """
        添加到记忆库
        
        Args:
            image_features: 图像特征 (B, C, H, W)
            mask: 分割掩码 (B, 1, H, W)
            metadata: 元数据
        """
        with torch.no_grad():
            # 编码记忆
            memory_feature = self.encode_memory(mask)
            
            # 编码图像特征
            image_feature = self.similarity_net(
                F.adaptive_avg_pool2d(image_features, (1, 1)).flatten(1)
            )
            
            # 创建记忆条目
            memory_item = {
                'image_feature': image_feature.cpu(),
                'memory_feature': memory_feature.cpu(),
                'mask': mask.cpu(),
                'metadata': metadata or {}
            }
            
            # 添加到记忆库
            self.memory_bank.append(memory_item)
    
    def retrieve_memory(self, 
                       image_features: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        """
        检索相关记忆
        
        Args:
            image_features: 当前图像特征 (B, C, H, W)
            
        Returns:
            (检索到的记忆特征, 相似度权重, 检索到的掩码)
        """
        if len(self.memory_bank) == 0:
            # 记忆库为空，返回零特征
            batch_size = image_features.size(0)
            zero_memory = torch.zeros(batch_size, self.memory_dim, device=self.device)
            zero_weights = torch.zeros(batch_size, self.memory_size, device=self.device)
            zero_masks = torch.zeros(batch_size, 1, 32, 32, device=self.device)
            return zero_memory, zero_weights, zero_masks
        
        # 编码当前图像特征
        current_feature = self.similarity_net(
            F.adaptive_avg_pool2d(image_features, (1, 1)).flatten(1)
        )  # (B, memory_dim)
        
        # 计算与记忆库中所有条目的相似度
        similarities = []
        memory_features = []
        masks = []
        
        for memory_item in self.memory_bank:
            # 计算相似度
            similarity = F.cosine_similarity(
                current_feature,
                memory_item['image_feature'].to(self.device),
                dim=1
            )  # (B,)
            similarities.append(similarity)
            
            # 收集记忆特征和掩码
            memory_features.append(memory_item['memory_feature'].to(self.device))
            masks.append(memory_item['mask'].to(self.device))
        
        # 堆叠结果
        similarities = torch.stack(similarities, dim=1)  # (B, memory_size)
        memory_features = torch.stack(memory_features, dim=1)  # (B, memory_size, memory_dim)
        masks = torch.stack(masks, dim=1)  # (B, memory_size, 1, H, W)
        
        # 计算注意力权重
        attention_weights = F.softmax(similarities, dim=1)  # (B, memory_size)
        
        # 加权融合记忆特征
        retrieved_memory = torch.sum(
            memory_features * attention_weights.unsqueeze(-1),
            dim=1
        )  # (B, memory_dim)
        
        # 加权融合掩码
        retrieved_masks = torch.sum(
            masks * attention_weights.view(-1, self.memory_size, 1, 1, 1),
            dim=1
        )  # (B, 1, H, W)
        
        return retrieved_memory, attention_weights, retrieved_masks
    
    def generate_memory_guided_prompts(self, 
                                     features: torch.Tensor,
                                     memory_features: torch.Tensor) -> Dict[str, torch.Tensor]:
        """
        生成记忆引导的提示
        
        Args:
            features: 当前图像特征 (B, C, H, W)
            memory_features: 检索到的记忆特征 (B, memory_dim)
            
        Returns:
            提示字典
        """
        batch_size, channels, height, width = features.shape
        
        # 将特征展平为序列
        features_flat = features.view(batch_size, channels, -1).transpose(1, 2)  # (B, H*W, C)
        
        # 扩展记忆特征到每个空间位置
        memory_expanded = memory_features.unsqueeze(1).repeat(1, height * width, 1)  # (B, H*W, memory_dim)
        
        # 融合当前特征和记忆特征
        fused_features = torch.cat([features_flat, memory_expanded], dim=-1)  # (B, H*W, C+memory_dim)
        
        # 生成提示
        prompts = {}
        for prompt_type, generator in self.memory_guided_generator.items():
            prompt_logits = generator(fused_features)  # (B, H*W, output_dim)
            
            if prompt_type == 'masks':
                # 掩码提示：重塑为2D
                mask_logits = prompt_logits.view(batch_size, height, width, -1)
                prompts[prompt_type] = mask_logits
            else:
                prompts[prompt_type] = prompt_logits
        
        # 添加记忆信息
        prompts['memory_features'] = memory_features
        prompts['fused_features'] = fused_features
        
        return prompts
    
    def forward(self, 
                image_features: torch.Tensor,
                context: Optional[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
        """
        前向传播
        
        Args:
            image_features: 图像特征 (B, C, H, W)
            context: 上下文信息，包含历史掩码
            
        Returns:
            提示字典
        """
        # 检索相关记忆
        memory_features, attention_weights, retrieved_masks = self.retrieve_memory(image_features)
        
        # 生成记忆引导的提示
        prompts = self.generate_memory_guided_prompts(image_features, memory_features)
        
        # 添加记忆相关信息
        prompts['memory_attention_weights'] = attention_weights
        prompts['retrieved_masks'] = retrieved_masks
        
        # 如果有新的掩码，更新记忆库
        if context and 'current_mask' in context:
            self.add_to_memory(image_features, context['current_mask'], context)
        
        return prompts
    
    def generate_prompts(self, 
                        image: torch.Tensor,
                        metadata: Optional[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
        """
        生成提示
        
        Args:
            image: 输入图像 (B, C, H, W)
            metadata: 元数据，包含历史信息
            
        Returns:
            提示字典
        """
        # 提取图像特征
        image_features = self.extract_features(image)
        
        # 生成记忆引导的提示
        prompts = self.forward(image_features, metadata)
        
        return prompts
    
    def compute_memory_loss(self, 
                          predictions: Dict[str, torch.Tensor],
                          targets: Dict[str, torch.Tensor]) -> torch.Tensor:
        """
        计算记忆引导损失
        
        Args:
            predictions: 预测结果
            targets: 目标结果
            
        Returns:
            损失值
        """
        total_loss = 0.0
        
        # 基础提示损失
        total_loss += self.compute_loss(predictions, targets)
        
        # 记忆一致性损失
        if 'retrieved_masks' in predictions and 'masks' in targets:
            memory_consistency_loss = F.mse_loss(
                predictions['retrieved_masks'],
                targets['masks']
            )
            total_loss += 0.1 * memory_consistency_loss
        
        # 记忆注意力正则化
        if 'memory_attention_weights' in predictions:
            attention_weights = predictions['memory_attention_weights']
            # 鼓励注意力分布不要太集中
            entropy_loss = -torch.sum(attention_weights * torch.log(attention_weights + 1e-8), dim=1)
            entropy_loss = torch.mean(entropy_loss)
            total_loss += 0.01 * entropy_loss
        
        return total_loss
    
    def clear_memory(self):
        """清空记忆库"""
        self.memory_bank.clear()
        self.logger.info("记忆库已清空")
    
    def get_memory_info(self) -> Dict[str, Any]:
        """获取记忆库信息"""
        return {
            'memory_size': len(self.memory_bank),
            'max_memory_size': self.memory_size,
            'memory_dim': self.memory_dim
        }
    
    def save_memory(self, save_path: str):
        """保存记忆库"""
        memory_data = {
            'memory_bank': list(self.memory_bank),
            'memory_size': self.memory_size,
            'memory_dim': self.memory_dim
        }
        torch.save(memory_data, save_path)
        self.logger.info(f"记忆库已保存: {save_path}")
    
    def load_memory(self, load_path: str):
        """加载记忆库"""
        memory_data = torch.load(load_path, map_location=self.device)
        self.memory_bank = deque(memory_data['memory_bank'], maxlen=self.memory_size)
        self.logger.info(f"记忆库已加载: {load_path}")
