import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import resnet34
from transformers import BertModel, BertConfig

class PositionalEncoding(nn.Module):
    # 位置编码 （用于视觉模型中的Transformer）
    def __init__(self, d_model, max_len=50):
        super(PositionalEncoding, self).__init__()
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)

        self.register_buffer('pe', pe.unsqueeze(0))
    
    def forward(self, x):
        return x + self.pe[:, :s.size(1)]

class VisionModel(nn.Module):
    # 视觉模型 ResNet + Transformer Encoder
    def __init__(self, num_classes, hidden_size=512):
        super(VisionModel, self).__init__()
        # CNN 特征提取
        self.cnn = nn.Sequential(*list(resnet34(pretrained=True).children())[:-2])
        # [B, 512, 1, W]
        self.adaptive_pool = nn.AdaptiveAvgPool2d((1, None))

        # Transformer Encoder
        self.pos_encoder = PositionalEncoding(hidden_size)
        encoder_layer = nn.TransformerEncoderLayer(d_model=hidden_size, nhead=8)
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=3)

        # 输出层
        self.fc = nn.Linear(hidden_size, num_classes)

    def forward(self, x):
        # CNN 部分
        x = self.cnn(x)                         # [B, 512, H, W]
        x = self.adaptive_pool(x)               # [B, 512, 1, W]
        x = x.squeeze(2).permute(2, 0, 1)       # [W, B, 512]

        # Transformer
        x = self.pos_encoder(x)
        x = self.transformer_encoder(x)         # [W, B, 512]

class LanguageModel(nn.Module):
    # 语言模型
    def __init__(self, num_classes, hidden_size=512):
        super(LanguageModel, self).__init__()
        bert_config = BertConfig(
            hidden_size=hidden_size,
            num_hidden_layers=4,
            num_attention_heads=8,
            vocab_size=num_classes
        )
        self.bert = BertModel(bert_config)

    def forward(self, x):
        # 输入x形状:    [W, B, C]
        x = x.permute(1, 0, 2)      # [B, W, C]
        outputs = self.bert(inputs_embeds=x)
        return outputs.last_hidden_state.permute(1, 0, 2)       # [W, B, C]

# ABINet 模型
class ABINet(nn.Module):
    def __init__(self, num_classes, iter_steps=3):
        super(ABINet, self).__init__()
        self.vision_model = VisionModel(num_classes)
        self.language_model = LanguageModel(num_classes)
        self.iter_steps = iter_steps        # 迭代修正次数

        # 迭代修正的融合层
        self.fusion = nn.Linear(1024, 512)
    
    def forward(self, x):
        # 初始视觉预测
        v_output = self.vision_model(x)     # [W, B, C]
        predictions = [v_output]

        # 迭代修正
        for _ in range(self.iter_steps):
            # 语言模型修正
            l_output = self.language_model(v_output)

            # 特征融合
            fused = torch.cat([v_output, l_output], dim-1)
            fused = self.fusion(fused)          # [W, B, 512]

            # 更新视觉预测
            v_output = self.vision_model.cnn(x)     # 重新提取视觉特征
            v_output = v_output + fused             # 残差连接
            predictions.append(v_output)

        return torch.stack(predictions, dim=1)      # [B, steps+1, W, C]