"""
Prefix-tuning hooks implementation for parameter-efficient fine-tuning.

This module provides hooks for modifying attention mechanisms in transformer models
to incorporate learned prefix vectors during inference.
"""

from typing import Dict, List, Optional, Any, Union, Tuple

import torch
import torch.nn as nn

from continuallearning.models.pefts.hooks._base_hook import (
    BaseHook,
    HookType,
    HookAttachError,
)
from continuallearning.models.pefts.hooks.hook_manager.hook_manager import (
    UnifiedHookManager,
)
from continuallearning.models.pefts.common.config import PrefixConfig
from continuallearning.interfaces.models.hook_interfaces import HookFactoryInterface
from continuallearning.registry import HOOK_FACTORY_REGISTRY, HOOK_MANAGER_REGISTRY
from continuallearning.utils.logging import get_logger

logger = get_logger(__name__)


class PrefixLayerHook(BaseHook):
    """
    Hook for implementing prefix-tuning in transformer attention layers.

    This hook manages prefix parameters and applies them to attention computations.

    Args:
        module: Transformer attention module to hook
        config: Prefix tuning configuration
    """

    def __init__(
        self,
        module: nn.Module,
        config: PrefixConfig,
    ):
        """Initialize prefix attention hook."""
        super().__init__(module=module, hook_type=HookType.FORWARD_REPLACE)
        self.config = config

        # Store layer index for multi-layer models
        self.layer_idx = getattr(config, "layer_idx", 0)

        # Initialize key parameters from config
        self.hidden_size = config.token_dim
        self.num_attention_heads = config.num_attention_heads
        self.head_dim = self.hidden_size // self.num_attention_heads
        self.prefix_length = config.num_tokens

        # Initialize prefix related parameters
        self.use_prefix_mlp = config.use_prefix_mlp
        self.prefix_mlp_hidden_size = config.prefix_mlp_hidden_size
        self.prefix_dropout = config.dropout
        self.task_specific = getattr(config, "task_specific", True)
        self.num_tasks = getattr(config, "num_tasks", 1)

        # Default to task 0 until explicitly set
        self.current_task_id = 0

        # Initialize prefix parameters
        self._initialize_prefix_parameters()

        # Import here to avoid circular imports
        from continuallearning.models.pefts.prompt.hooks.attention_mechanisms import (
            AttentionMechanism,
        )

        # Detect attention mechanism
        try:
            self.attention_mechanism = AttentionMechanism.create(module)
        except ValueError as e:
            raise HookAttachError(f"Cannot attach prefix hook: {str(e)}")

        logger.debug(
            f"Attached prefix hook to {type(module).__name__} "
            f"using {self.attention_mechanism.__class__.__name__}"
        )

    def _initialize_prefix_parameters(self) -> None:
        """Initialize prefix parameters for the layer."""
        # Calculate total prefix dimension
        total_prefix_dim = self.num_attention_heads * self.head_dim

        # Create prefix parameters based on whether we use MLP
        if self.use_prefix_mlp:
            # Create MLP parameters for generating prefixes
            # Input tokens for prefix generation
            self.prefix_input_tokens = nn.Parameter(
                torch.randn(
                    self.num_tasks,
                    2,  # key and value
                    self.prefix_length,
                    total_prefix_dim,
                )
            )

            # MLP for prefix generation
            self.prefix_mlp = nn.Sequential(
                nn.Linear(total_prefix_dim, self.prefix_mlp_hidden_size),
                nn.Tanh(),
                nn.Linear(self.prefix_mlp_hidden_size, 2 * total_prefix_dim),
            )

            # Dropout layer
            self.prefix_dropout_layer = nn.Dropout(self.prefix_dropout)

            # Initialize weights
            self._init_weights()
        else:
            # Directly learn prefix vectors without MLP transformation
            self.prefix_tokens = nn.Parameter(
                torch.randn(
                    self.num_tasks,
                    2,  # key and value
                    self.prefix_length,
                    self.num_attention_heads,
                    self.head_dim,
                )
            )

            # Initialize with small random values
            nn.init.uniform_(self.prefix_tokens, -0.1, 0.1)

    def _init_weights(self) -> None:
        """Initialize the weights of MLP modules."""
        for module in self.prefix_mlp.modules():
            if isinstance(module, nn.Linear):
                module.weight.data.normal_(mean=0.0, std=0.02)
                if module.bias is not None:
                    module.bias.data.zero_()

    def get_prefix_vectors(
        self, batch_size: int, task_id: Optional[int] = None
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        """
        Get prefix vectors for the current layer.

        Args:
            batch_size: Batch size for the current input
            task_id: Optional task ID (defaults to current task)

        Returns:
            Tuple[torch.Tensor, torch.Tensor]: Key and value prefix vectors
                with shape [batch_size, prefix_length, num_heads, head_dim]
        """
        # Use the current task ID if none provided
        if task_id is None:
            task_id = self.current_task_id

        # Handle task_id based on task_specific flag
        if not self.task_specific:
            task_id = 0
        else:
            task_id = task_id % self.num_tasks

        try:
            if self.use_prefix_mlp:
                # Get prefix input tokens for the current task
                # Shape: [1, 2, prefix_length, total_prefix_dim]
                input_tokens = self.prefix_input_tokens[task_id : task_id + 1]

                # Flatten for MLP input
                # Shape: [1, 2*prefix_length*total_prefix_dim]
                flat_input = input_tokens.view(1, -1)

                # Pass through MLP to get prefixes
                # Shape: [1, 2*total_prefix_dim]
                prefixes = self.prefix_mlp(flat_input)

                # Apply dropout
                prefixes = self.prefix_dropout_layer(prefixes)

                # Reshape to [1, 2, prefix_length, num_heads, head_dim]
                prefixes = prefixes.view(
                    1,
                    2,
                    self.prefix_length,
                    self.num_attention_heads,
                    self.head_dim,
                )

                # Extract key and value prefix
                # Shape: [1, prefix_length, num_heads, head_dim]
                key_prefix = prefixes[0, 0]
                value_prefix = prefixes[0, 1]

            else:
                # Directly use the learned prefix tokens
                # Shape: [prefix_length, num_heads, head_dim]
                key_prefix = self.prefix_tokens[task_id, 0]
                value_prefix = self.prefix_tokens[task_id, 1]

            # Expand to batch size
            # Shape: [batch_size, prefix_length, num_heads, head_dim]
            key_prefix = key_prefix.expand(batch_size, -1, -1, -1)
            value_prefix = value_prefix.expand(batch_size, -1, -1, -1)

            return key_prefix, value_prefix

        except Exception as e:
            logger.error(f"Error getting prefix vectors: {str(e)}")
            # Return empty tensors in case of error
            empty_shape = (
                batch_size,
                self.prefix_length,
                self.num_attention_heads,
                self.head_dim,
            )
            return torch.zeros(empty_shape), torch.zeros(empty_shape)

    def _extract_hidden_states(self, args, kwargs):
        """
        Extract hidden states from function arguments.

        Args:
            args: Positional arguments to the forward method
            kwargs: Keyword arguments to the forward method

        Returns:
            torch.Tensor: The input hidden states tensor or None if not found
        """
        # Try different common keyword argument names
        for key in ["hidden_states", "input", "inputs", "x", "features", "states"]:
            if key in kwargs and isinstance(kwargs[key], torch.Tensor):
                return kwargs[key]

        # No matching keywords, try positional arguments
        if len(args) > 0:
            # Most common case: first argument is hidden states
            if isinstance(args[0], torch.Tensor):
                return args[0]
            # Handle tuple or list of tensors (common in some models)
            elif isinstance(args[0], (tuple, list)) and len(args[0]) > 0:
                if isinstance(args[0][0], torch.Tensor):
                    return args[0][0]

        # Special case: check for 'attention_inputs' pattern in some models
        if "attention_inputs" in kwargs and isinstance(
            kwargs["attention_inputs"], tuple
        ):
            for item in kwargs["attention_inputs"]:
                if isinstance(item, torch.Tensor):
                    return item

        # Could not find hidden states
        logger.warning(
            f"Could not extract hidden states from args={type(args)} kwargs={list(kwargs.keys())}"
        )
        return None

    def _hook_forward(self, *args, **kwargs) -> torch.Tensor:
        """
        Apply prefix-tuning to transformer attention.

        Args:
            *args: Positional arguments to the original forward method
            **kwargs: Keyword arguments to the original forward method

        Returns:
            torch.Tensor: Output with prefix-tuning applied
        """
        if not self.is_active or self._original_forward is None:
            raise RuntimeError(
                "Hook not properly attached - no original forward method available"
            )

        try:
            # Get hidden states from arguments
            hidden_states = self._extract_hidden_states(args, kwargs)
            if hidden_states is None:
                logger.warning("No hidden states found in input, using original method")
                return self._original_forward(*args, **kwargs)

            # Get batch size for prefix expansion
            batch_size = hidden_states.size(0)

            # Get prefix vectors for this layer
            key_prefix, value_prefix = self.get_prefix_vectors(batch_size)

            # Get attention mask if provided
            attention_mask = kwargs.get("attention_mask", None)

            # Apply prefix using the appropriate attention mechanism
            return self.attention_mechanism.apply_prefix(
                hidden_states=hidden_states,
                key_prefix=key_prefix,
                value_prefix=value_prefix,
                attention_mask=attention_mask,
                **kwargs,
            )

        except Exception as e:
            logger.error(f"Error in prefix hook: {str(e)}")
            # Fallback to original method
            logger.warning("Falling back to original forward method")
            return self._original_forward(*args, **kwargs)

    def get_parameters(self) -> List[nn.Parameter]:
        """Get all trainable parameters for the hook."""
        if self.use_prefix_mlp:
            return [self.prefix_input_tokens] + list(self.prefix_mlp.parameters())
        else:
            return [self.prefix_tokens]


@HOOK_FACTORY_REGISTRY.register("prefix")
class PrefixHookFactory(HookFactoryInterface):
    """
    Factory for creating prefix tuning hooks.

    Args:
        config: Prefix tuning configuration
        layer_indices: Mapping from module paths to layer indices
        **kwargs: Additional configuration parameters
    """

    def __init__(
        self,
        config: Optional[Union[Dict[str, Any], PrefixConfig]] = None,
        layer_indices: Optional[Dict[str, int]] = None,
        **kwargs,
    ):
        """Initialize prefix hook factory."""
        super().__init__(**kwargs)

        # Store layer indices
        self.layer_indices = layer_indices or {}

        # Process configuration
        from continuallearning.models.pefts.common.config_factory import (
            create_peft_config,
        )

        if config is None:
            self._config = create_peft_config("prefix", kwargs)
        elif isinstance(config, dict):
            self._config = create_peft_config("prefix", config)
        else:
            self._config = config

    def __call__(self, module_path: str, module: nn.Module) -> PrefixLayerHook:
        """
        Create a prefix hook for a specific module.

        Args:
            module_path: Path to the module in the model
            module: Module to hook

        Returns:
            PrefixLayerHook: Hook instance for this module
        """
        # Get layer index from path
        layer_idx = self.layer_indices.get(module_path, 0)

        # Update config with layer index
        config_dict = self._config.to_dict()
        config_dict.update({"layer_idx": layer_idx})

        # Create task-specific config
        from continuallearning.models.pefts.common.config_factory import (
            create_peft_config,
        )

        layer_config = create_peft_config("prefix", config_dict)

        # Create and return hook
        return PrefixLayerHook(module=module, config=layer_config)

    @property
    def config(self) -> PrefixConfig:
        """Get the prefix tuning configuration."""
        return self._config


@HOOK_MANAGER_REGISTRY.register("prefix")
class PrefixHookManager(UnifiedHookManager):
    """
    Manager for prefix hooks across a model.

    Args:
        model: Model to apply prefix-tuning to
        config: Configuration for hook creation and detection
        layer_indices: Mapping from module paths to layer indices
    """

    def __init__(
        self,
        model: nn.Module,
        config: Optional[Union[Dict[str, Any], PrefixConfig]] = None,
        layer_indices: Optional[Dict[str, int]] = None,
    ):
        """Initialize prefix hook manager."""
        # Process configuration
        from continuallearning.models.pefts.common.config_factory import (
            create_peft_config,
        )

        if config is None:
            prefix_config = create_peft_config("prefix", {})
        elif isinstance(config, dict):
            prefix_config = create_peft_config("prefix", config)
        else:
            prefix_config = config

        # Create hook factory
        factory = PrefixHookFactory(config=prefix_config, layer_indices=layer_indices)

        # Configure module detection
        detection_config = {
            "find_transformer_layers": True,
            "target_patterns": [
                "encoder.layer",
                "blocks",
                "transformer.h",
                "attention",
                "attn",
            ],
        }

        # Initialize manager
        super().__init__(model, factory, detection_config)

    def set_task(self, task_id: int) -> None:
        """
        Set the current task for all hooks.

        Args:
            task_id: Task identifier
        """
        for hook in self._hooks.values():
            if isinstance(hook, PrefixLayerHook):
                hook.current_task_id = task_id
