#!/usr/bin/env python3

"""
Attention-enhanced memory module for Neural-SLAM
Replaces traditional LSTM with modern attention mechanisms while maintaining compatibility
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import math

class MultiHeadAttention(nn.Module):
    """Multi-head self-attention mechanism"""
    
    def __init__(self, embed_dim, num_heads, dropout=0.1):
        super().__init__()
        assert embed_dim % num_heads == 0
        
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.head_dim = embed_dim // num_heads
        self.scale = math.sqrt(self.head_dim)
        
        self.q_proj = nn.Linear(embed_dim, embed_dim)
        self.k_proj = nn.Linear(embed_dim, embed_dim)
        self.v_proj = nn.Linear(embed_dim, embed_dim)
        self.out_proj = nn.Linear(embed_dim, embed_dim)
        
        self.dropout = nn.Dropout(dropout)
        
    def forward(self, x, mask=None):
        """
        Args:
            x: [batch_size, seq_len, embed_dim]
            mask: [batch_size, seq_len] (optional)
        """
        batch_size, seq_len, embed_dim = x.size()
        
        # Linear projections
        q = self.q_proj(x)  # [B, L, D]
        k = self.k_proj(x)  # [B, L, D]
        v = self.v_proj(x)  # [B, L, D]
        
        # Reshape for multi-head attention
        q = q.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)  # [B, H, L, D/H]
        k = k.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)  # [B, H, L, D/H]
        v = v.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)  # [B, H, L, D/H]
        
        # Scaled dot-product attention
        attn_weights = torch.matmul(q, k.transpose(-2, -1)) / self.scale  # [B, H, L, L]
        
        # Apply mask if provided
        if mask is not None:
            mask = mask.unsqueeze(1).unsqueeze(2)  # [B, 1, 1, L]
            attn_weights = attn_weights.masked_fill(mask == 0, float('-inf'))
        
        attn_weights = F.softmax(attn_weights, dim=-1)
        attn_weights = self.dropout(attn_weights)
        
        # Apply attention to values
        attn_output = torch.matmul(attn_weights, v)  # [B, H, L, D/H]
        
        # Concatenate heads
        attn_output = attn_output.transpose(1, 2).contiguous().view(
            batch_size, seq_len, embed_dim)  # [B, L, D]
        
        # Final linear projection
        output = self.out_proj(attn_output)
        
        return output, attn_weights

class PositionalEncoding(nn.Module):
    """Sinusoidal positional encoding for sequence modeling"""
    
    def __init__(self, embed_dim, max_len=5000):
        super().__init__()
        
        pe = torch.zeros(max_len, embed_dim)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        
        div_term = torch.exp(torch.arange(0, embed_dim, 2).float() * 
                           (-math.log(10000.0) / embed_dim))
        
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        
        self.register_buffer('pe', pe.unsqueeze(0))  # [1, max_len, embed_dim]
        
    def forward(self, x):
        """
        Args:
            x: [batch_size, seq_len, embed_dim]
        """
        seq_len = x.size(1)
        return x + self.pe[:, :seq_len, :]

class FeedForward(nn.Module):
    """Position-wise feed-forward network"""
    
    def __init__(self, embed_dim, hidden_dim, dropout=0.1):
        super().__init__()
        self.linear1 = nn.Linear(embed_dim, hidden_dim)
        self.linear2 = nn.Linear(hidden_dim, embed_dim)
        self.dropout = nn.Dropout(dropout)
        self.activation = nn.GELU()
        
    def forward(self, x):
        return self.linear2(self.dropout(self.activation(self.linear1(x))))

class AttentionMemory(nn.Module):
    """
    Attention-enhanced memory module for Neural-SLAM
    
    Maintains backward compatibility with LSTM interface while using modern attention mechanisms.
    Can be used as a drop-in replacement for nn.LSTM in the Neural-SLAM architecture.
    """
    
    def __init__(self, input_size=512, hidden_size=512, num_heads=8, num_layers=2, 
                 dropout=0.1, use_lstm_baseline=True):
        super().__init__()
        
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_heads = num_heads
        self.num_layers = num_layers
        self.use_lstm_baseline = use_lstm_baseline
        
        # Original LSTM for backward compatibility and residual connections
        if use_lstm_baseline:
            self.lstm = nn.LSTM(input_size, hidden_size, batch_first=True)
        
        # Input projection to match hidden size if needed
        if input_size != hidden_size:
            self.input_proj = nn.Linear(input_size, hidden_size)
        else:
            self.input_proj = nn.Identity()
        
        # Positional encoding
        self.pos_encoding = PositionalEncoding(hidden_size)
        
        # Attention layers
        self.attention_layers = nn.ModuleList([
            MultiHeadAttention(hidden_size, num_heads, dropout)
            for _ in range(num_layers)
        ])
        
        # Feed-forward layers
        self.ff_layers = nn.ModuleList([
            FeedForward(hidden_size, hidden_size * 2, dropout)
            for _ in range(num_layers)
        ])
        
        # Layer normalization
        self.layer_norms_1 = nn.ModuleList([
            nn.LayerNorm(hidden_size) for _ in range(num_layers)
        ])
        self.layer_norms_2 = nn.ModuleList([
            nn.LayerNorm(hidden_size) for _ in range(num_layers)
        ])
        
        # Output projection
        self.output_proj = nn.Linear(hidden_size, hidden_size)
        
        # Dropout
        self.dropout = nn.Dropout(dropout)
        
    def forward(self, input_seq, hidden_state=None, return_all_outputs=False):
        """
        Forward pass maintaining LSTM-compatible interface
        
        Args:
            input_seq: [batch_size, seq_len, input_size] or [batch_size, input_size] for single step
            hidden_state: LSTM-style hidden state tuple (h_0, c_0) for compatibility
            return_all_outputs: Whether to return all sequence outputs or just the last
            
        Returns:
            output: [batch_size, seq_len, hidden_size] or [batch_size, hidden_size]
            new_hidden: Updated hidden state tuple for LSTM compatibility
        """
        
        # Handle single timestep input
        single_step = False
        if input_seq.dim() == 2:  # [batch_size, input_size]
            input_seq = input_seq.unsqueeze(1)  # [batch_size, 1, input_size]
            single_step = True
        
        batch_size, seq_len, _ = input_seq.size()
        
        # Input projection
        x = self.input_proj(input_seq)  # [batch_size, seq_len, hidden_size]
        
        # LSTM baseline processing
        lstm_output = None
        if self.use_lstm_baseline:
            lstm_output, new_hidden = self.lstm(input_seq, hidden_state)
        else:
            # Create dummy hidden state for compatibility
            device = input_seq.device
            h_0 = torch.zeros(1, batch_size, self.hidden_size, device=device)
            c_0 = torch.zeros(1, batch_size, self.hidden_size, device=device)
            new_hidden = (h_0, c_0)
        
        # Add positional encoding
        x = self.pos_encoding(x)
        x = self.dropout(x)
        
        # Apply attention layers
        for i in range(self.num_layers):
            # Multi-head self-attention
            attn_output, _ = self.attention_layers[i](x)
            x = self.layer_norms_1[i](x + attn_output)  # Residual connection
            
            # Feed-forward
            ff_output = self.ff_layers[i](x)
            x = self.layer_norms_2[i](x + ff_output)  # Residual connection
        
        # Combine with LSTM output if available
        if lstm_output is not None:
            # Weighted combination of LSTM and attention outputs
            alpha = 0.7  # Weight for attention output
            x = alpha * x + (1 - alpha) * lstm_output
        
        # Output projection
        output = self.output_proj(x)
        
        # Return format matching LSTM
        if single_step:
            output = output.squeeze(1)  # [batch_size, hidden_size]
        
        if not return_all_outputs and not single_step:
            output = output[:, -1, :]  # Return only last timestep
        
        return output, new_hidden
    
    def get_attention_weights(self, input_seq, layer_idx=0):
        """Get attention weights for visualization"""
        if input_seq.dim() == 2:
            input_seq = input_seq.unsqueeze(1)
        
        x = self.input_proj(input_seq)
        x = self.pos_encoding(x)
        
        # Forward through layers up to the specified layer
        for i in range(min(layer_idx + 1, self.num_layers)):
            attn_output, attn_weights = self.attention_layers[i](x)
            if i == layer_idx:
                return attn_weights
            x = self.layer_norms_1[i](x + attn_output)
            ff_output = self.ff_layers[i](x)
            x = self.layer_norms_2[i](x + ff_output)
        
        return None

class CompatibleAttentionMemory(nn.Module):
    """
    Simplified attention memory module that can directly replace LSTM calls
    in the existing Neural-SLAM codebase with minimal changes
    """
    
    def __init__(self, input_size, hidden_size, batch_first=True, num_heads=8):
        super().__init__()
        
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.batch_first = batch_first
        
        # Core attention memory
        self.attention_memory = AttentionMemory(
            input_size=input_size,
            hidden_size=hidden_size, 
            num_heads=num_heads,
            use_lstm_baseline=True
        )
        
    def forward(self, input, hx=None):
        """LSTM-compatible forward pass"""
        return self.attention_memory(input, hx)

# Factory function for easy integration
def create_attention_memory(input_size, hidden_size, **kwargs):
    """Factory function to create attention memory with sensible defaults"""
    return CompatibleAttentionMemory(
        input_size=input_size,
        hidden_size=hidden_size,
        **kwargs
    )
