#!/usr/bin/env python3
"""
自动点提示生成模块

第一帧自动生成点提示
对于EchoNet等坐标标注数据集，训练目标是预测坐标点
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from typing import Dict, List, Tuple, Optional, Any

import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent.parent))
from .base_prompt import BasePromptGenerator
from .input_prompt import InputPromptHandler
from utils.logger import get_logger


class AutoPointPromptGenerator(BasePromptGenerator):
    """自动点提示生成器
    
    第一帧自动生成点提示，用于EchoNet等坐标标注数据集的训练
    """
    
    def __init__(self,
                 input_dim: int = 256,
                 prompt_dim: int = 256,
                 num_points: int = 2,  # EchoNet通常有2个点
                 device: str = "cuda"):
        """
        初始化自动点提示生成器
        
        Args:
            input_dim: 输入特征维度
            prompt_dim: 提示特征维度
            num_points: 生成的点数量
            device: 设备
        """
        super().__init__(input_dim, prompt_dim, device)
        
        self.num_points = num_points
        self.input_handler = InputPromptHandler(device)
        
        # 点坐标生成器
        self.point_generator = nn.Sequential(
            nn.Linear(prompt_dim, prompt_dim // 2),
            nn.ReLU(),
            nn.Linear(prompt_dim // 2, prompt_dim // 4),
            nn.ReLU(),
            nn.Linear(prompt_dim // 4, num_points * 2)  # num_points * (x, y)
        )
        
        # 点标签生成器（前景/背景）
        self.label_generator = nn.Sequential(
            nn.Linear(prompt_dim, prompt_dim // 2),
            nn.ReLU(),
            nn.Linear(prompt_dim // 2, num_points),
            nn.Sigmoid()  # 输出概率，>0.5为前景
        )
        
        # 置信度评估器
        self.confidence_estimator = nn.Sequential(
            nn.Linear(prompt_dim, prompt_dim // 2),
            nn.ReLU(),
            nn.Linear(prompt_dim // 2, 1),
            nn.Sigmoid()
        )
        
        self.logger.info(f"自动点提示生成器初始化完成: num_points={num_points}")
    
    def compute_point_loss(self,
                          predicted_points: torch.Tensor,
                          target_points: torch.Tensor,
                          reduction: str = 'mean') -> torch.Tensor:
        """
        计算预测点与真实标注点的损失
        
        Args:
            predicted_points: 预测的点坐标 (B, num_points, 2) 或 (B, num_points * 2)
            target_points: 真实标注点坐标 (B, num_points, 2) 或 (B, num_points * 2)
            reduction: 损失归约方式 ('mean', 'sum', 'none')
            
        Returns:
            损失值
        """
        # 确保形状一致
        if len(predicted_points.shape) == 2:
            # (B, num_points * 2) -> (B, num_points, 2)
            predicted_points = predicted_points.view(-1, self.num_points, 2)
        if len(target_points.shape) == 2:
            target_points = target_points.view(-1, self.num_points, 2)
        
        # 计算L2距离（欧氏距离）
        point_loss = nn.functional.mse_loss(
            predicted_points, 
            target_points, 
            reduction='none'
        )  # (B, num_points, 2)
        
        # 计算每个点的总距离
        point_distances = torch.sqrt(torch.sum(point_loss, dim=-1))  # (B, num_points)
        
        # 归约
        if reduction == 'mean':
            return torch.mean(point_distances)
        elif reduction == 'sum':
            return torch.sum(point_distances)
        else:
            return point_distances
    
    def forward(self,
                image_features: torch.Tensor,
                context: Optional[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
        """
        前向传播
        
        Args:
            image_features: 图像特征 (B, C, H, W)
            context: 上下文信息
            
        Returns:
            点提示字典
        """
        batch_size = image_features.shape[0]
        
        # 提取全局特征
        global_features = F.adaptive_avg_pool2d(image_features, (1, 1))
        global_features = global_features.view(batch_size, -1)  # (B, actual_feature_dim)
        
        # 动态处理特征维度：如果实际特征维度与配置的input_dim不匹配，使用投影层
        actual_feature_dim = global_features.shape[1]
        if actual_feature_dim != self.input_dim:
            # 创建或使用动态投影层
            if not hasattr(self, 'dynamic_projection') or self.dynamic_projection.in_features != actual_feature_dim:
                self.dynamic_projection = nn.Linear(actual_feature_dim, self.input_dim).to(self.device)
                # 确保动态投影层被注册为模型参数
                self.add_module('dynamic_projection', self.dynamic_projection)
            global_features = self.dynamic_projection(global_features)  # (B, input_dim)
        
        global_features = self.input_projection(global_features)  # (B, prompt_dim)
        
        # 生成点坐标
        point_coords = self.point_generator(global_features)  # (B, num_points * 2)
        point_coords = point_coords.view(batch_size, self.num_points, 2)  # (B, num_points, 2)
        
        # 归一化到[0, 1]范围（相对坐标）
        point_coords = torch.sigmoid(point_coords)
        
        # 生成点标签
        label_logits = self.label_generator(global_features)  # (B, num_points)
        point_labels = (label_logits > 0.5).long()  # (B, num_points)
        
        # 计算置信度
        confidence = self.confidence_estimator(global_features)  # (B, 1)
        
        return {
            'point_coords': point_coords,
            'point_labels': point_labels,
            'confidence': confidence,
            'label_logits': label_logits
        }
    
    def generate_prompts(self,
                        image: torch.Tensor,
                        metadata: Optional[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
        """
        生成点提示
        
        Args:
            image: 输入图像 (B, C, H, W)
            metadata: 元数据（可包含目标坐标用于训练）
            
        Returns:
            点提示字典（SAM格式）
        """
        # 提取图像特征
        image_features = self.extract_features(image)
        
        # 生成点提示
        prompts = self.forward(image_features, metadata)
        
        # 转换为SAM格式
        batch_size = prompts['point_coords'].shape[0]
        H, W = image.shape[2], image.shape[3]
        
        # 将相对坐标转换为绝对坐标
        point_coords_abs = prompts['point_coords'] * torch.tensor([W, H], device=self.device)
        point_labels = prompts['point_labels']
        
        # 展平为SAM格式
        point_coords_flat = point_coords_abs.view(batch_size, -1, 2)
        point_labels_flat = point_labels.view(batch_size, -1)
        
        # 转换为SAM格式：points需要是 (B, N, 2) 格式
        return {
            'points': point_coords_flat,  # (B, num_points, 2)
            'point_labels': point_labels_flat,  # (B, num_points)
            'confidence': prompts['confidence']  # (B, 1)
        }
    
    def compute_coordinate_loss(self,
                               predictions: Dict[str, torch.Tensor],
                               targets: Dict[str, torch.Tensor]) -> torch.Tensor:
        """
        计算坐标预测损失（用于EchoNet等坐标标注数据集）
        
        Args:
            predictions: 预测的点坐标 {'point_coords': (B, num_points, 2)}
            targets: 目标坐标 {'point_coords': (B, num_points, 2)}
            
        Returns:
            损失值
        """
        if 'point_coords' not in predictions or 'point_coords' not in targets:
            return torch.tensor(0.0, device=self.device)
        
        pred_coords = predictions['point_coords']
        target_coords = targets['point_coords']
        
        # L1损失（坐标回归）
        coord_loss = F.l1_loss(pred_coords, target_coords)
        
        # 可选：L2损失
        coord_loss_l2 = F.mse_loss(pred_coords, target_coords)
        
        # 组合损失
        total_loss = coord_loss + 0.5 * coord_loss_l2
        
        return total_loss
