
"""
Cross-Modal Attention Gate (Paper Section 2.4)
Implements Equations 10-12: Bidirectional attention mechanisms
"""

import torch
import torch.nn as nn

class BidirectionalAttentionGate(nn.Module):
    def __init__(self, latent_dim):
        super().__init__()
        # Behavior→Text projection (Equation 10)
        self.b2t_q = nn.Linear(latent_dim, latent_dim)
        self.b2t_kv = nn.Linear(latent_dim, latent_dim*2)
        
        # Text→Behavior projection (Equation 11)
        self.t2b_q = nn.Linear(latent_dim, latent_dim)
        self.t2b_kv = nn.Linear(latent_dim, latent_dim*2)
        
        # Adaptive gating (Equation 12)
        self.gate = nn.Sequential(
            nn.Linear(2*latent_dim, latent_dim),
            nn.Sigmoid()
        )

    def forward(self, behavior_feats, text_feats):
        # Behavior-to-Text attention flow
        Q_behavior = self.b2t_q(behavior_feats)
        K_text, V_text = self.b2t_kv(text_feats).chunk(2, dim=-1)
        b2t_attn = F.softmax(Q_behavior @ K_text.T / torch.sqrt(torch.tensor(Q_behavior.size(-1))), dim=-1)
        attended_text = b2t_attn @ V_text

        # Text-to-Behavior attention flow
        Q_text = self.t2b_q(text_feats)
        K_behavior, V_behavior = self.t2b_kv(behavior_feats).chunk(2, dim=-1)
        t2b_attn = F.softmax(Q_text @ K_behavior.T / torch.sqrt(torch.tensor(Q_text.size(-1))), dim=-1)
        attended_behavior = t2b_attn @ V_behavior

        # Adaptive gating mechanism
        combined = torch.cat([attended_text, attended_behavior], dim=-1)
        gate = self.gate(combined)
        return gate * attended_text + (1 - gate) * attended_behavior
