#!/usr/bin/env python3
"""
与训练权重兼容的推理模型
"""

import torch
import torch.nn as nn
from typing import Dict

# 导入原始组件
import sys
from pathlib import Path
MODEL_DIR = Path(r"D:\VSCodeProjects\UC_System\uc_model")
sys.path.insert(0, str(MODEL_DIR))

from models.image_branch import ImageBranch
from models.text_branch import TextBranch


class EndoMultimodalModelCompat(nn.Module):
    """
    与训练权重兼容的多模态模型
    """

    def __init__(self,
                 image_pretrained: bool = True,
                 text_pretrained: str = 'bert-base-uncased',
                 feature_dim: int = 256,
                 num_classes: int = 8):
        super().__init__()

        # 初始化分支模型
        self.image_branch = ImageBranch(pretrained=image_pretrained)
        self.text_branch = TextBranch(pretrained_model=text_pretrained, output_dim=feature_dim)

        # 图像特征维度调整
        self.image_pooling = nn.AdaptiveAvgPool2d((1, 1))

        # 融合层 - 使用与训练时相同的结构（没有批归一化）
        fusion_input_dim = feature_dim * 2  # 图像(256) + 文本(256) = 512

        # 根据训练时的权重结构定义融合层
        self.fusion_layers = nn.Sequential(
            nn.Linear(fusion_input_dim, 512),  # Layer 0
            nn.ReLU(inplace=True),
            nn.Dropout(0.3),
            nn.Linear(512, 256),              # Layer 3
            nn.ReLU(inplace=True),
            nn.Dropout(0.3),
            nn.Linear(256, 128),              # Layer 6
            nn.ReLU(inplace=True),
            nn.Dropout(0.3),
        )

        # 最终dropout
        self.final_dropout = nn.Dropout(0.5)

        # 分类器
        self.classifier = nn.Linear(128, num_classes)

        # 输出归一化
        self.output_norm = nn.LayerNorm(num_classes)

    def forward(self, images: torch.Tensor, text_inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
        """
        前向传播
        """
        # 图像分支
        image_features = self.image_branch(images)
        image_features = self.image_pooling(image_features)
        image_features = image_features.flatten(1)

        # 文本分支
        text_features = self.text_branch(text_inputs)

        # 特征融合
        fused_features = torch.cat([image_features, text_features], dim=1)

        # 多层融合处理
        fused_features = self.fusion_layers(fused_features)

        # 最终dropout
        fused_features = self.final_dropout(fused_features)

        # 分类
        logits = self.classifier(fused_features)
        logits = self.output_norm(logits)

        return logits


# 测试加载
if __name__ == "__main__":
    print("创建兼容模型...")
    model = EndoMultimodalModelCompat(
        image_pretrained=True,
        text_pretrained='bert-base-uncased',
        feature_dim=256,
        num_classes=8
    )

    # 加载权重
    model_path = Path(r"D:\VSCodeProjects\UC_System\uc_model\outputs\standard_training\best_model.pth")
    checkpoint = torch.load(model_path, map_location='cpu')

    # 尝试加载权重
    try:
        model.load_state_dict(checkpoint['model_state_dict'], strict=False)
        print("✅ 模型权重加载成功！")

        # 检查缺失和多余的键
        missing_keys, unexpected_keys = model.load_state_dict(checkpoint['model_state_dict'], strict=False)
        if missing_keys:
            print(f"缺失的键: {missing_keys}")
        if unexpected_keys:
            print(f"多余的键: {unexpected_keys}")

    except Exception as e:
        print(f"❌ 加载失败: {e}")