# core/models/multimodal_fusion.py
import torch
from torch import nn


class MultiModalFusion(nn.Module):
    def __init__(self):
        super().__init__()
        # 文本编码器
        self.text_encoder = nn.TransformerEncoder(
            nn.TransformerEncoderLayer(d_model=512, nhead=8),
            num_layers=3
        )

        # 几何特征编码器
        self.geom_encoder = nn.Sequential(
            nn.Linear(1024, 512),
            nn.ReLU(),
            nn.LayerNorm(512)
        )

        # 融合层
        self.fusion_layer = nn.MultiheadAttention(embed_dim=512, num_heads=8)

        # 解码器
        self.decoder = nn.TransformerDecoder(
            nn.TransformerDecoderLayer(d_model=512, nhead=8),
            num_layers=3
        )

    def forward(self, text_features, geom_features):
        # 编码阶段
        text_encoded = self.text_encoder(text_features)
        geom_encoded = self.geom_encoder(geom_features)

        # 跨模态注意力
        fused_features, _ = self.fusion_layer(
            text_encoded, geom_encoded, geom_encoded
        )

        # 解码阶段
        return self.decoder(fused_features)