"""
Attention mechanism handlers for different model architectures.

This module contains handler classes for different types of attention mechanisms,
helping to abstract away the differences between various model implementations.
"""

from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F

from continuallearning.utils.logging import get_logger

logger = get_logger(__name__)


class AttentionMechanism:
    """
    Base class for different types of attention mechanisms.

    This class and its subclasses handle the model-specific details of
    applying prefix vectors to different attention implementations.
    """

    def __init__(self, module: nn.Module):
        """
        Initialize attention mechanism handler.

        Args:
            module: Attention module
        """
        self.module = module

    def apply_prefix(
        self,
        hidden_states: torch.Tensor,
        key_prefix: torch.Tensor,
        value_prefix: torch.Tensor,
        attention_mask: Optional[torch.Tensor] = None,
        **kwargs,
    ) -> torch.Tensor:
        """
        Apply prefix to attention mechanism.

        Args:
            hidden_states: Input hidden states
            key_prefix: Prefix for key states
            value_prefix: Prefix for value states
            attention_mask: Optional attention mask
            **kwargs: Additional arguments

        Returns:
            torch.Tensor: Output hidden states

        Raises:
            NotImplementedError: This method must be implemented by subclasses
        """
        raise NotImplementedError("Subclasses must implement this method")

    @staticmethod
    def create(module: nn.Module) -> "AttentionMechanism":
        """
        Create appropriate attention mechanism handler for module.

        Args:
            module: Attention module

        Returns:
            AttentionMechanism: Appropriate handler

        Raises:
            ValueError: If no suitable handler is found
        """
        # Try different attention mechanisms
        for mechanism_cls in [
            HFAttentionMechanism,
            PyTorchAttentionMechanism,
            GPTNeoXAttentionMechanism,
            DefaultAttentionMechanism,
        ]:
            if mechanism_cls.can_handle(module):
                return mechanism_cls(module)

        # If no mechanism matches, raise error
        raise ValueError(
            f"Cannot find suitable attention mechanism handler for {type(module).__name__}"
        )


class HFAttentionMechanism(AttentionMechanism):
    """Handler for Hugging Face style attention modules."""

    @staticmethod
    def can_handle(module: nn.Module) -> bool:
        """Check if module can be handled by this mechanism."""
        return (
            hasattr(module, "query")
            and hasattr(module, "key")
            and hasattr(module, "value")
        )

    def apply_prefix(
        self,
        hidden_states: torch.Tensor,
        key_prefix: torch.Tensor,
        value_prefix: torch.Tensor,
        attention_mask: Optional[torch.Tensor] = None,
        **kwargs,
    ) -> torch.Tensor:
        """Apply prefix to HF-style attention."""
        batch_size, seq_len = hidden_states.size()[:2]
        num_heads = key_prefix.size(2)
        head_dim = key_prefix.size(3)

        # Project hidden states to query, key, value
        query = self.module.query(hidden_states)
        key = self.module.key(hidden_states)
        value = self.module.value(hidden_states)

        # Reshape to [batch, seq, num_heads, head_dim]
        query = query.view(batch_size, -1, num_heads, head_dim)
        key = key.view(batch_size, -1, num_heads, head_dim)
        value = value.view(batch_size, -1, num_heads, head_dim)

        # Append prefixes to key and value
        key = torch.cat([key_prefix, key], dim=1)
        value = torch.cat([value_prefix, value], dim=1)

        # Compute attention scores
        return self._compute_attention(query, key, value, attention_mask)

    def _compute_attention(
        self,
        query: torch.Tensor,
        key: torch.Tensor,
        value: torch.Tensor,
        attention_mask: Optional[torch.Tensor] = None,
    ) -> torch.Tensor:
        """Compute attention with modified key and value."""
        batch_size, seq_len = query.size(0), query.size(1)
        kv_seq_len = key.size(1)  # Includes prefix length
        num_heads = query.size(2)
        head_dim = query.size(3)

        # Transpose for batched matrix multiplication
        # [batch, seq, heads, head_dim] -> [batch, heads, seq, head_dim]
        query = query.transpose(1, 2)
        key = key.transpose(1, 2)
        value = value.transpose(1, 2)

        # Compute attention scores
        # [batch, heads, seq, kv_seq]
        attention_scores = torch.matmul(query, key.transpose(-1, -2))
        attention_scores = attention_scores / (head_dim**0.5)

        # Apply attention mask if provided
        if attention_mask is not None:
            # Extend mask to include prefix positions
            prefix_len = kv_seq_len - seq_len
            if prefix_len > 0:
                if attention_mask.dim() == 4:  # [batch, 1, seq, seq]
                    # Create extended mask that includes prefix positions
                    prefix_mask = torch.ones(
                        (batch_size, 1, seq_len, prefix_len),
                        device=attention_mask.device,
                        dtype=attention_mask.dtype,
                    )
                    # Allow attention to prefix tokens
                    extended_mask = torch.cat([prefix_mask, attention_mask], dim=-1)
                    attention_mask = extended_mask
                # For other mask formats, add extension logic as needed

            attention_scores = attention_scores + attention_mask

        # Apply softmax to get attention probabilities
        attention_probs = F.softmax(attention_scores, dim=-1)

        # Apply attention to value
        context = torch.matmul(attention_probs, value)

        # Transpose back
        # [batch, heads, seq, head_dim] -> [batch, seq, heads, head_dim]
        context = context.transpose(1, 2).contiguous()

        # Reshape back to [batch, seq, hidden_size]
        hidden_size = num_heads * head_dim
        context = context.view(batch_size, seq_len, hidden_size)

        # Apply output projection if present
        if hasattr(self.module, "output_proj") or hasattr(self.module, "out_proj"):
            output_proj = getattr(self.module, "output_proj", None)
            if output_proj is None:
                output_proj = getattr(self.module, "out_proj", None)

            if output_proj is not None:
                context = output_proj(context)

        return context


class PyTorchAttentionMechanism(AttentionMechanism):
    """Handler for PyTorch style attention modules."""

    @staticmethod
    def can_handle(module: nn.Module) -> bool:
        """Check if module can be handled by this mechanism."""
        return (
            hasattr(module, "q_proj")
            and hasattr(module, "k_proj")
            and hasattr(module, "v_proj")
        )

    def apply_prefix(
        self,
        hidden_states: torch.Tensor,
        key_prefix: torch.Tensor,
        value_prefix: torch.Tensor,
        attention_mask: Optional[torch.Tensor] = None,
        **kwargs,
    ) -> torch.Tensor:
        """Apply prefix to PyTorch-style attention."""
        batch_size, seq_len = hidden_states.size()[:2]
        num_heads = key_prefix.size(2)
        head_dim = key_prefix.size(3)

        # Project hidden states to query, key, value
        query = self.module.q_proj(hidden_states)
        key = self.module.k_proj(hidden_states)
        value = self.module.v_proj(hidden_states)

        # Reshape to [batch, seq, num_heads, head_dim]
        query = query.view(batch_size, -1, num_heads, head_dim)
        key = key.view(batch_size, -1, num_heads, head_dim)
        value = value.view(batch_size, -1, num_heads, head_dim)

        # Append prefixes to key and value
        key = torch.cat([key_prefix, key], dim=1)
        value = torch.cat([value_prefix, value], dim=1)

        # Compute attention
        attention_output = self._compute_attention(query, key, value, attention_mask)

        # Apply output projection
        if hasattr(self.module, "out_proj"):
            return self.module.out_proj(attention_output)

        return attention_output

    def _compute_attention(
        self,
        query: torch.Tensor,
        key: torch.Tensor,
        value: torch.Tensor,
        attention_mask: Optional[torch.Tensor] = None,
    ) -> torch.Tensor:
        """Compute attention with modified key and value."""
        batch_size, seq_len = query.size(0), query.size(1)
        num_heads = query.size(2)
        head_dim = query.size(3)

        # Transpose for attention calculation
        query = query.transpose(1, 2)  # [batch, heads, seq, head_dim]
        key = key.transpose(1, 2)  # [batch, heads, kv_seq, head_dim]
        value = value.transpose(1, 2)  # [batch, heads, kv_seq, head_dim]

        # Compute attention scores
        attention_scores = torch.matmul(query, key.transpose(-1, -2))
        attention_scores = attention_scores / (head_dim**0.5)

        # Apply attention mask if provided
        if attention_mask is not None:
            attention_scores = attention_scores + attention_mask

        # Apply softmax
        attention_probs = F.softmax(attention_scores, dim=-1)

        # Apply attention to value
        context = torch.matmul(attention_probs, value)

        # Transpose back and reshape
        context = context.transpose(1, 2).contiguous()
        context = context.view(batch_size, seq_len, num_heads * head_dim)

        return context


class GPTNeoXAttentionMechanism(AttentionMechanism):
    """Handler for GPT-NeoX style attention modules."""

    @staticmethod
    def can_handle(module: nn.Module) -> bool:
        """Check if module can be handled by this mechanism."""
        return hasattr(module, "query_key_value") or (
            hasattr(module, "c_attn") and not hasattr(module, "key")
        )

    def apply_prefix(
        self,
        hidden_states: torch.Tensor,
        key_prefix: torch.Tensor,
        value_prefix: torch.Tensor,
        attention_mask: Optional[torch.Tensor] = None,
        **kwargs,
    ) -> torch.Tensor:
        """Apply prefix to GPT-NeoX/GPT2 style attention."""
        batch_size, seq_len = hidden_states.size()[:2]
        num_heads = key_prefix.size(2)
        head_dim = key_prefix.size(3)

        # Get QKV projection method
        if hasattr(self.module, "query_key_value"):
            qkv_proj = self.module.query_key_value
        else:
            qkv_proj = self.module.c_attn

        # Apply QKV projection
        qkv = qkv_proj(hidden_states)

        # Split into query, key, value
        qkv = qkv.view(batch_size, seq_len, 3, num_heads, head_dim)
        query, key, value = qkv.unbind(dim=2)

        # Append prefixes to key and value
        key = torch.cat([key_prefix, key], dim=1)
        value = torch.cat([value_prefix, value], dim=1)

        # Compute attention and apply output projection
        attention_output = self._compute_attention(query, key, value, attention_mask)

        # Apply output projection if present
        if hasattr(self.module, "c_proj"):
            return self.module.c_proj(attention_output)
        elif hasattr(self.module, "dense"):
            return self.module.dense(attention_output)

        return attention_output

    def _compute_attention(
        self,
        query: torch.Tensor,
        key: torch.Tensor,
        value: torch.Tensor,
        attention_mask: Optional[torch.Tensor] = None,
    ) -> torch.Tensor:
        """Compute attention with modified key and value."""
        batch_size, seq_len = query.size(0), query.size(1)
        num_heads = query.size(2)
        head_dim = query.size(3)

        # Transpose for attention calculation
        query = query.transpose(1, 2)  # [batch, heads, seq, head_dim]
        key = key.transpose(1, 2)  # [batch, heads, kv_seq, head_dim]
        value = value.transpose(1, 2)  # [batch, heads, kv_seq, head_dim]

        # Compute attention
        attention_scores = torch.matmul(query, key.transpose(-1, -2))
        attention_scores = attention_scores / (head_dim**0.5)

        # Apply mask if provided
        if attention_mask is not None:
            attention_scores = attention_scores + attention_mask

        # Apply softmax
        attention_probs = F.softmax(attention_scores, dim=-1)

        # Apply attention to value
        context = torch.matmul(attention_probs, value)

        # Reshape to output format
        context = context.transpose(1, 2).contiguous()
        context = context.view(batch_size, seq_len, num_heads * head_dim)

        return context


class DefaultAttentionMechanism(AttentionMechanism):
    """Fallback handler for unknown attention modules."""

    @staticmethod
    def can_handle(module: nn.Module) -> bool:
        """Always returns True as this is the fallback handler."""
        return True

    def apply_prefix(
        self,
        hidden_states: torch.Tensor,
        key_prefix: torch.Tensor,
        value_prefix: torch.Tensor,
        attention_mask: Optional[torch.Tensor] = None,
        **kwargs,
    ) -> torch.Tensor:
        """Apply basic attention mechanism based on common patterns."""
        # Try to infer attention pattern
        logger.warning(
            f"Using fallback attention mechanism for {type(self.module).__name__}. "
            f"This may not work correctly."
        )

        # Use original module's forward pass
        return self.module.forward(
            hidden_states, attention_mask=attention_mask, **kwargs
        )
