import torch
import torch.nn.functional as F
from typing import Optional

class SelfAttentionV1(torch.nn.Module):
    """A basic implementation of self-attention mechanism.
    
    This class implements a simple version of self-attention where the input is transformed
    into query, key, and value matrices through learned linear transformations.
    
    Args:
        hidden_dim (int): Dimension of the input hidden states
        d_out (int): Output dimension for the attention mechanism
    """
    def __init__(self, hidden_dim: int, d_out: int) -> None:
        super().__init__()
        self.W_q = torch.nn.Parameter(torch.randn(hidden_dim, d_out))
        self.W_k = torch.nn.Parameter(torch.randn(hidden_dim, d_out))
        self.W_v = torch.nn.Parameter(torch.randn(hidden_dim, d_out))

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """Forward pass of the self-attention mechanism.
        
        Args:
            x (torch.Tensor): Input tensor of shape (seq_len, hidden_dim)
            
        Returns:
            torch.Tensor: Output tensor of shape (seq_len, d_out)
        """
        query = x @ self.W_q
        key = x @ self.W_k
        value = x @ self.W_v

        attention_scores = query @ key.T
        attention_weight = F.softmax(attention_scores / torch.sqrt(torch.tensor(key.shape[-1])), dim=-1)
        return attention_weight @ value

class SelfAttentionV2(torch.nn.Module):
    """An improved version of self-attention with dropout and optional bias.
    
    This implementation adds dropout for regularization and optional bias terms
    in the linear transformations.
    
    Args:
        hidden_dim (int): Dimension of the input hidden states
        d_out (int): Output dimension for the attention mechanism
        dropout (float, optional): Dropout probability. Defaults to 0.0
        qkv_bias (bool, optional): Whether to include bias terms in Q,K,V projections. Defaults to False
    """
    def __init__(self, hidden_dim: int, d_out: int, dropout: float = 0.0, qkv_bias: bool = False) -> None:
        super().__init__()
        self.W_q = torch.nn.Linear(hidden_dim, d_out, bias=qkv_bias)
        self.W_k = torch.nn.Linear(hidden_dim, d_out, bias=qkv_bias)
        self.W_v = torch.nn.Linear(hidden_dim, d_out, bias=qkv_bias)
        self.dropout = torch.nn.Dropout(dropout)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """Forward pass of the improved self-attention mechanism.
        
        Args:
            x (torch.Tensor): Input tensor of shape (seq_len, hidden_dim)
            
        Returns:
            torch.Tensor: Output tensor of shape (seq_len, d_out)
        """
        query = self.W_q(x)
        key = self.W_k(x)
        value = self.W_v(x)

        attention_scores = query @ key.T
        attention_weight = F.softmax(attention_scores / torch.sqrt(torch.tensor(key.shape[-1])), dim=-1)
        attention_weight = self.dropout(attention_weight)
        return attention_weight @ value

class CausalAttention(torch.nn.Module):
    """A causal self-attention mechanism that prevents attending to future tokens.
    
    This implementation adds a causal mask to ensure that each position can only attend
    to previous positions and itself, making it suitable for autoregressive models.
    
    Args:
        hidden_dim (int): Dimension of the input hidden states
        d_out (int): Output dimension for the attention mechanism
        context_length (int): Maximum sequence length for the causal mask
        dropout (float, optional): Dropout probability. Defaults to 0.0
        qkv_bias (bool, optional): Whether to include bias terms in Q,K,V projections. Defaults to False
    """
    def __init__(self, hidden_dim: int, d_out: int, context_length: int, dropout: float = 0.0, qkv_bias: bool = False) -> None:
        super().__init__()
        self.W_q = torch.nn.Linear(hidden_dim, d_out, bias=qkv_bias)
        self.W_k = torch.nn.Linear(hidden_dim, d_out, bias=qkv_bias)
        self.W_v = torch.nn.Linear(hidden_dim, d_out, bias=qkv_bias)
        self.dropout = torch.nn.Dropout(dropout)
        self.register_buffer('mask', torch.triu(torch.ones(context_length, context_length), diagonal=1))
        self.attention_weight: Optional[torch.Tensor] = None

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """Forward pass of the causal attention mechanism.
        
        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, seq_len, hidden_dim)
            
        Returns:
            torch.Tensor: Output tensor of shape (batch_size, seq_len, d_out)
        """
        num_tokens = x.shape[1] if x.ndim == 3 else x.shape[0]
        query = self.W_q(x)
        key = self.W_k(x)
        value = self.W_v(x)

        attention_scores = query @ key.transpose(-2, -1)
        attention_scores = attention_scores.masked_fill(self.mask.bool()[:num_tokens, :num_tokens], -torch.inf)
        attention_weight = F.softmax(attention_scores / torch.sqrt(torch.tensor(key.shape[-1])), dim=-1)
        attention_weight = self.dropout(attention_weight)
        self.attention_weight = attention_weight
        attention_vec = attention_weight @ value
        return attention_vec

class MultiHeadAttentionWrapper(torch.nn.Module):
    """A wrapper class that implements multi-head attention.
    
    This class combines multiple causal attention heads in parallel and concatenates
    their outputs to create a multi-head attention mechanism.
    
    Args:
        hidden_dim (int): Dimension of the input hidden states
        d_out (int): Output dimension for each attention head
        context_length (int): Maximum sequence length for the causal mask
        num_heads (int): Number of parallel attention heads
        dropout (float, optional): Dropout probability. Defaults to 0.0
        qkv_bias (bool, optional): Whether to include bias terms in Q,K,V projections. Defaults to False
    """
    def __init__(self, hidden_dim: int, d_out: int, context_length: int, num_heads: int, dropout: float = 0.0, qkv_bias: bool = False) -> None:
        super().__init__()
        self.heads = torch.nn.ModuleList([
            CausalAttention(hidden_dim, d_out, context_length, dropout, qkv_bias) for _ in range(num_heads)])

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """Forward pass of the multi-head attention mechanism.
        
        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, seq_len, hidden_dim)
            
        Returns:
            torch.Tensor: Concatenated output from all attention heads
        """
        return torch.cat([head(x) for head in self.heads], dim=-1)

class MultiHeadAttention(torch.nn.Module):
    """A multi-head attention mechanism that combines multiple causal attention heads.
    
    This implementation combines multiple causal attention heads in parallel and concatenates
    their outputs to create a multi-head attention mechanism.
    
    Args:
        hidden_dim (int): Dimension of the input hidden states
        d_out (int): Output dimension for each attention head
        context_length (int): Maximum sequence length for the causal mask
        num_heads (int): Number of parallel attention heads
        dropout (float, optional): Dropout probability. Defaults to 0.0
        qkv_bias (bool, optional): Whether to include bias terms in Q,K,V projections.
                                   Defaults to False
    """
    def __init__(self, hidden_dim: int, d_out: int, context_length: int, num_heads: int,
                 dropout: float = 0.0, qkv_bias: bool = False) -> None:
        super().__init__()
        assert d_out % num_heads == 0, "d_out must be divisible by num_heads"
        self.W_q = torch.nn.Linear(hidden_dim, d_out, bias=qkv_bias)
        self.W_k = torch.nn.Linear(hidden_dim, d_out, bias=qkv_bias)
        self.W_v = torch.nn.Linear(hidden_dim, d_out, bias=qkv_bias)
        self.dropout = torch.nn.Dropout(dropout)
        # optional output projection
        self.output_proj = torch.nn.Linear(d_out, d_out)
        self.d_out = d_out
        self.num_heads = num_heads
        self.head_dim = d_out // num_heads
        self.register_buffer('mask', torch.triu(torch.ones(context_length, context_length),
                                                diagonal=1))

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """Forward pass of the multi-head attention mechanism.
        
        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, seq_len, hidden_dim)
            
        Returns:
            torch.Tensor: Concatenated output from all attention heads
        """
        batch_size, num_tokens, hidden_dim = x.shape
        query = self.W_q(x)
        key = self.W_k(x)
        value = self.W_v(x)

        query = query.view(batch_size, -1, self.num_heads, self.head_dim)
        key = key.view(batch_size, -1, self.num_heads, self.head_dim)
        value = value.view(batch_size, -1, self.num_heads, self.head_dim)

        query = query.transpose(1, 2)
        key = key.transpose(1, 2)
        value = value.transpose(1, 2)

        attention_scores = query @ key.transpose(-2, -1)
        attention_scores = attention_scores.masked_fill(self.mask.bool()[:num_tokens, :num_tokens], -torch.inf)
        attention_weight = F.softmax(attention_scores / torch.sqrt(torch.tensor(key.shape[-1])), dim=-1)
        attention_weight = self.dropout(attention_weight)
        attention_vec = attention_weight @ value
        attention_vec = attention_vec.transpose(1, 2)
        attention_vec = attention_vec.contiguous().view(batch_size, -1, self.d_out)
        output = self.output_proj(attention_vec)
        return output
