# File: fedentgate/core/attention_aggregation.py
"""Multi-source Attention Aggregation Module"""

import torch
import torch.nn as nn

class CoAttentionAggregator(nn.Module):
    def __init__(self, input_dim, temperature_init=0.07):
        """
        Initialize attention-based aggregation module
        Args:
            input_dim: Dimension of input feature vector
            temperature_init: Initial temperature parameter (τ)
        """
        super().__init__()
        self.temperature = nn.Parameter(torch.tensor(temperature_init))
        self.scale_factor = nn.Parameter(torch.tensor(1.0))
        self.bias = nn.Parameter(torch.tensor(0.0))
        self.feature_proj = nn.Linear(input_dim, input_dim)

    def build_input_vector(self, entropy, gating_state, noise_feature):
        """
        Construct multi-source input vector (Eq.8)
        Args:
            entropy: Client entropy (H_k)
            gating_state: Binary gating state
            noise_feature: Noise attenuation factor
        Returns:
            input_vec: Combined feature vector
        """
        return torch.cat([
            entropy.unsqueeze(0),
            gating_state.float().unsqueeze(0),
            noise_feature.unsqueeze(0)
        ], dim=0)

    def adaptive_temperature(self, input_norms):
        """
        Compute adaptive temperature parameter (Eq.9)
        Args:
            input_norms: L2 norms of all clients' input vectors
        Returns:
            temp: Adapted temperature value
        """
        variance = torch.var(input_norms)
        return self.scale_factor * variance + self.bias

    def forward(self, client_features):
        """
        Compute attention weights for aggregation
        Args:
            client_features: List of input vectors for all clients
        Returns:
            attn_weights: Normalized attention weights
        """
        # Project features
        projected = torch.stack([self.feature_proj(f) for f in client_features])
        
        # Compute adaptive temperature
        norms = torch.norm(projected, dim=1)
        temp = self.adaptive_temperature(norms)
        
        # Compute attention scores (Eq.3)
        scores = torch.exp(projected @ projected.T / temp)
        return torch.softmax(scores, dim=0)
