#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
多模态融合模型
将图像和文本特征融合，进行UCEIS评分分类
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict, Any

# 相对导入
from .image_branch import ImageBranch
from .text_branch import TextBranch


class EndoMultimodalModel(nn.Module):
    """
    多模态UCEIS评分模型
    融合图像和文本特征，输出8分类结果
    """

    def __init__(self,
                 image_pretrained: bool = True,
                 text_pretrained: str = 'bert-base-uncased',
                 feature_dim: int = 256,
                 num_classes: int = 8):
        super().__init__()

        # 初始化分支模型
        self.image_branch = ImageBranch(pretrained=image_pretrained)
        self.text_branch = TextBranch(pretrained_model=text_pretrained, output_dim=feature_dim)

        # 图像特征维度调整（从特征图到向量）
        self.image_pooling = nn.AdaptiveAvgPool2d((1, 1))

        # 融合层维度
        fusion_input_dim = feature_dim * 2  # 图像(256) + 文本(256) = 512

        # TODO: Implement custom Multi-Layer Fusion Attention Mechanism here
        # 当前使用简单的拼接作为基线融合方法

        # 多层融合网络 - 增强dropout以防止过拟合
        self.fusion_layers = nn.Sequential(
            nn.Linear(fusion_input_dim, 512),
            nn.BatchNorm1d(512),  # 添加批归一化
            nn.ReLU(inplace=True),
            nn.Dropout(0.4),      # 增加dropout
            nn.Linear(512, 256),
            nn.BatchNorm1d(256),  # 添加批归一化
            nn.ReLU(inplace=True),
            nn.Dropout(0.3),      # 增加dropout
            nn.Linear(256, 128),
            nn.BatchNorm1d(128),  # 添加批归一化
            nn.ReLU(inplace=True),
            nn.Dropout(0.2),      # 增加dropout
        )

        # 分类器前添加额外的dropout层
        self.final_dropout = nn.Dropout(0.5)  # 高dropout防止过拟合

        # 分类器
        self.classifier = nn.Linear(128, num_classes)

        # 输出归一化
        self.output_norm = nn.LayerNorm(num_classes)

    def forward(self, images: torch.Tensor, text_inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
        """
        前向传播

        Args:
            images: (B, 3, 224, 224) 输入图像张量
            text_inputs: 包含BERT输入的字典
                - input_ids: (B, max_len)
                - attention_mask: (B, max_len)

        Returns:
            logits: (B, 8) 分类logits
        """
        # 图像分支前向传播
        image_features = self.image_branch(images)  # (B, 256, H, W)

        # 将图像特征图转换为特征向量
        image_features = self.image_pooling(image_features)  # (B, 256, 1, 1)
        image_features = image_features.flatten(1)  # (B, 256)

        # 文本分支前向传播
        text_features = self.text_branch(text_inputs)  # (B, 256)

        # 特征融合
        # TODO: 在此处实现注意力机制等复杂融合策略
        fused_features = torch.cat([image_features, text_features], dim=1)  # (B, 512)

        # 多层融合处理
        fused_features = self.fusion_layers(fused_features)  # (B, 128)

        # 最终dropout防止过拟合
        fused_features = self.final_dropout(fused_features)

        # 分类
        logits = self.classifier(fused_features)  # (B, 8)
        logits = self.output_norm(logits)

        return logits

    def get_features(self, images: torch.Tensor, text_inputs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
        """
        获取中间特征用于分析和可视化

        Args:
            images: 输入图像
            text_inputs: 文本输入

        Returns:
            features_dict: 包含各阶段特征的字典
        """
        with torch.no_grad():
            # 获取图像特征
            image_features = self.image_branch(images)
            image_features_vec = self.image_pooling(image_features).flatten(1)

            # 获取文本特征
            text_features = self.text_branch(text_inputs)

            # 获取融合特征
            fused_features = torch.cat([image_features_vec, text_features], dim=1)
            fusion_features = self.fusion_layers(fused_features)

            return {
                'image_features': image_features_vec,
                'text_features': text_features,
                'fused_features': fusion_features
            }


def test_fusion_model():
    """测试多模态融合模型"""
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 创建模型
    model = EndoMultimodalModel(image_pretrained=False, text_pretrained='bert-base-uncased').to(device)

    # 创建测试输入
    batch_size = 4

    # 图像输入
    test_images = torch.randn(batch_size, 3, 224, 224).to(device)

    # 文本输入（模拟tokenizer输出）
    test_input_ids = torch.randint(0, 30522, (batch_size, 128)).to(device)
    test_attention_mask = torch.ones(batch_size, 128).to(device)
    test_text_inputs = {
        'input_ids': test_input_ids,
        'attention_mask': test_attention_mask
    }

    # 前向传播
    with torch.no_grad():
        logits = model(test_images, test_text_inputs)
        features = model.get_features(test_images, test_text_inputs)

        print(f"图像输入尺寸: {test_images.shape}")
        print(f"文本input_ids尺寸: {test_input_ids.shape}")
        print(f"文本attention_mask尺寸: {test_attention_mask.shape}")
        print(f"输出logits尺寸: {logits.shape}")
        print(f"图像特征尺寸: {features['image_features'].shape}")
        print(f"文本特征尺寸: {features['text_features'].shape}")
        print(f"融合特征尺寸: {features['fused_features'].shape}")
        print(f"模型总参数数量: {sum(p.numel() for p in model.parameters()):,}")

    return logits.shape, features


if __name__ == "__main__":
    test_fusion_model()