# 结构编码器（Transformer）

import torch
import torch.nn as nn
from torch.nn import TransformerEncoder, TransformerEncoderLayer

class StructEncoder(nn.Module):
    def __init__(self, vocab_size=100, d_model=256, dropout=0.2, n_layers=4):
        super().__init__()
        # 减小模型尺寸
        self.token_embedding = nn.Embedding(vocab_size, d_model)
        self.pos_encoder = nn.Parameter(torch.randn(1, 500, d_model))
        
        # 简化Transformer结构
        self.transformer = TransformerEncoder(
            TransformerEncoderLayer(
                d_model=d_model,
                nhead=8,
                dim_feedforward=512,
                dropout=dropout
            ),
            num_layers=n_layers
        )
        
        # 添加输出归一化
        self.norm = nn.LayerNorm(d_model)
        
        # 路径序列编码部分
        self.token_embedding = nn.Embedding(vocab_size, d_model//2)
        self.pos_encoder = nn.Parameter(torch.randn(1, 500, d_model//2))
        self.transformer = TransformerEncoder(
            TransformerEncoderLayer(
                d_model=d_model//2,
                nhead=8,
                dim_feedforward=512
            ),
            num_layers=6
        )
        
        # 图像编码部分
        self.cnn = nn.Sequential(
            nn.Conv2d(3, 64, 3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.MaxPool2d(2),
            nn.Conv2d(64, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.AdaptiveAvgPool2d(1)
        )

    def forward(self, batch):
        # 添加序列长度处理
        tokens = batch['tokens']
        seq_lens = batch['seq_len']  # 来自collate_fn
        
        # 修改embedding计算
        seq_emb = self.token_embedding(tokens) + self.pos_encoder[:, :tokens.size(1)]
        
        # 添加mask处理（关键修改）
        padding_mask = (tokens == 0)  # 假设collate_fn用0填充
        seq_feat = self.transformer(seq_emb, src_key_padding_mask=padding_mask)
        
        # 处理图像
        img = batch['image']
        if img.dim() == 3:  # [C,H,W]
            img = img.unsqueeze(0)  # 添加batch维度 [1,C,H,W]
        img_feat = self.cnn(img).squeeze(-1).squeeze(-1)  # 从[B,256,1,1]变为[B,256]
        
        # 双模态融合
        return {
            'seq': seq_feat,  # [B, L, D//2]
            'img': img_feat.unsqueeze(1).expand(-1, seq_feat.size(1), -1)  # [B, L, D//2]
        }