#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
文本分支模型
基于BERT预训练模型，输出256维文本特征
"""

import torch
import torch.nn as nn
from transformers import BertModel, BertConfig
from typing import Dict, Any


class TextBranch(nn.Module):
    """
    文本分支模型
    基于BERT-base预训练模型，将临床文本编码为256维特征向量
    """

    def __init__(self, pretrained_model: str = 'bert-base-uncased', output_dim: int = 256):
        super().__init__()

        # 加载预训练的BERT模型
        self.bert = BertModel.from_pretrained(pretrained_model)

        # 获取BERT隐藏层维度（通常为768）
        bert_hidden_dim = self.bert.config.hidden_size

        # 特征降维层：将BERT特征从768维降维到256维
        self.feature_projection = nn.Sequential(
            nn.Linear(bert_hidden_dim, 512),
            nn.ReLU(inplace=True),
            nn.Dropout(0.2),
            nn.Linear(512, output_dim),
            nn.LayerNorm(output_dim)
        )

        # 输出归一化
        self.output_norm = nn.LayerNorm(output_dim)

    def forward(self, text_inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
        """
        前向传播

        Args:
            text_inputs: 包含BERT输入的字典
                - input_ids: (B, max_len) 词汇ID
                - attention_mask: (B, max_len) 注意力掩码

        Returns:
            text_features: (B, 256) 文本特征向量
        """
        # BERT编码
        bert_outputs = self.bert(
            input_ids=text_inputs['input_ids'],
            attention_mask=text_inputs['attention_mask']
        )

        # 获取[CLS] token的表示作为文本特征
        cls_features = bert_outputs.last_hidden_state[:, 0, :]  # (B, 768)

        # 特征降维
        text_features = self.feature_projection(cls_features)  # (B, 256)

        # 输出归一化
        text_features = self.output_norm(text_features)

        return text_features


def test_text_branch():
    """测试文本分支模型"""
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 创建模型
    model = TextBranch().to(device)

    # 创建测试输入
    batch_size = 4
    max_length = 128

    # 模拟BERT tokenizer输出
    test_input_ids = torch.randint(0, 30522, (batch_size, max_length)).to(device)  # BERT词汇表大小约30k
    test_attention_mask = torch.ones(batch_size, max_length).to(device)

    text_inputs = {
        'input_ids': test_input_ids,
        'attention_mask': test_attention_mask
    }

    # 前向传播
    with torch.no_grad():
        output = model(text_inputs)
        print(f"输入input_ids尺寸: {test_input_ids.shape}")
        print(f"输入attention_mask尺寸: {test_attention_mask.shape}")
        print(f"输出文本特征尺寸: {output.shape}")
        print(f"模型参数数量: {sum(p.numel() for p in model.parameters()):,}")

    return output.shape


if __name__ == "__main__":
    test_text_branch()