"""
Prefix-tuning adapter implementation for parameter-efficient fine-tuning.

This module implements the prefix-tuning method as described in the paper
"Prefix-Tuning: Optimizing Continuous Prompts for Generation"
(Li & Liang, 2021).

Unlike standard prompt tuning which prepends tokens to the input sequence,
prefix-tuning directly modifies the key and value matrices in attention layers.
"""

from typing import Any, Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn

from continuallearning.interfaces.models.hook_interfaces import HookManagerInterface
from continuallearning.registry import HOOK_ADAPTER_REGISTRY
from continuallearning.utils.logging import get_logger

from continuallearning.models.backbones.base import BaseBackbone
from continuallearning.models.pefts.common.config import PrefixConfig
from continuallearning.models.pefts.common.exceptions import ConfigurationError
from continuallearning.models.pefts.common.config_factory import create_peft_config
from continuallearning.models.pefts.common.model_utils import detect_attention_heads
from continuallearning.models.pefts.prompt.prefix_utils import validate_prefix_config
from continuallearning.models.pefts.prompt.modules.base_prompt_adapter import (
    BasePromptAdapter,
)

# 使用自定义日志系统创建记录器
logger = get_logger(__name__)


@HOOK_ADAPTER_REGISTRY.register("prefix_tuning")
class PrefixTuningAdapter(BasePromptAdapter):
    """
    Prefix-tuning adapter for parameter-efficient fine-tuning.

    This adapter implements prefix-tuning as described in the paper:
    "Prefix-Tuning: Optimizing Continuous Prompts for Generation" (Li & Liang, 2021).

    It adds learnable prefix vectors directly to the key and value matrices in
    self-attention layers, rather than adding tokens to the input sequence.
    This allows efficient adaptation of pre-trained models with just
    a small number of task-specific parameters.

    Args:
        backbone: Backbone model to apply prefix-tuning to
        num_tokens: Number of prefix tokens to add
        token_dim: Dimension of token embeddings (will auto-detect if 0)
        position: Position to add tokens ("prepend", "append", "deep")
        num_layers: Total number of model layers (will auto-detect if 0)
        target_layers: List of layer indices to modify (defaults to all layers)
        dropout: Dropout probability for regularization
        initialization: How to initialize prefixes ("random", "zero", "text")
        shared_across_layers: Whether to share prefix parameters across layers
        task_id: Optional task identifier for multi-task settings
        num_attention_heads: Number of attention heads (will auto-detect if 0)
        use_prefix_mlp: Whether to use MLP for generating prefix embeddings
        prefix_mlp_hidden_size: Hidden size for prefix generation MLP
        prefix_mlp_dropout: Dropout probability for prefix MLP
        prefix_mlp_activation: Activation function for prefix MLP
        prefix_mlp_layers: Number of layers in prefix MLP
        config: Optional PrefixConfig configuration object or dictionary
    """

    def __init__(
        self,
        backbone: BaseBackbone,
        num_tokens: int = 5,
        token_dim: int = 768,
        position: str = "prepend",
        num_layers: int = 12,
        target_layers: Optional[List[int]] = None,
        dropout: float = 0.0,
        initialization: str = "random",
        shared_across_layers: bool = False,
        task_id: Optional[int] = None,
        num_attention_heads: int = 12,
        use_prefix_mlp: bool = True,
        prefix_mlp_hidden_size: int = 512,
        prefix_mlp_dropout: float = 0.1,
        prefix_mlp_activation: str = "tanh",
        prefix_mlp_layers: int = 2,
        config: Optional[Union[Dict[str, Any], PrefixConfig]] = None,
    ):
        """
        Initialize Prefix Tuning adapter.

        Supports two initialization methods:
        1. Directly providing all parameters
        2. Providing a PrefixConfig configuration object (can be mixed with direct parameters)

        Parameter precedence: Direct parameters > configuration parameters > automatic detection > default values

        Args:
            backbone: Backbone model to apply prefix-tuning to
            num_tokens: Number of prefix tokens to add
            token_dim: Dimension of token embeddings
            position: Position to add tokens
            num_layers: Total number of model layers
            target_layers: List of layer indices to modify
            dropout: Dropout probability
            initialization: Initialization method
            shared_across_layers: Whether to share parameters across layers
            task_id: Optional task ID
            num_attention_heads: Number of attention heads
            use_prefix_mlp: Whether to use MLP for prefix generation
            prefix_mlp_hidden_size: Hidden size for prefix generation MLP
            prefix_mlp_dropout: Dropout probability for prefix MLP
            prefix_mlp_activation: Activation function for prefix MLP
            prefix_mlp_layers: Number of layers in prefix MLP
            config: Optional PrefixConfig configuration object or dictionary
        """
        # Process configuration with improved error handling
        try:
            # Use already imported configuration utilities
            # Uses validate_prefix_config, create_peft_config, and ConfigurationError imported at the top

            # Create initial configuration
            if config is not None:
                if isinstance(config, dict):
                    self.config = create_peft_config("prefix", config)
                else:
                    self.config = config
            else:
                # Collect parameters from local variables while excluding 'self', 'backbone', and 'config'
                local_params = {
                    name: value
                    for name, value in locals().items()
                    if name not in ["self", "backbone", "config"] and value is not None
                }

                # Create configuration
                self.config = create_peft_config("prefix", local_params)

            # Validate configuration against backbone
            self.config = validate_prefix_config(self.config, backbone)

            # Log configuration
            logger.info(
                f"Prefix tuning adapter initialized with: "
                f"{self.config.num_tokens} tokens, "
                f"{self.config.token_dim} dimensions, "
                f"{len(self.config.target_layers)} target layers"
            )

        except Exception as e:
            if isinstance(e, ConfigurationError):
                # Re-raise configuration errors directly
                raise
            else:
                # Wrap other errors in ConfigurationError
                raise ConfigurationError(
                    f"Error initializing prefix tuning adapter: {str(e)}",
                    method="prefix_tuning",
                )

        # Initialize base class
        super().__init__(backbone=backbone, config=self.config)

        # Initialize prefix-specific parameters
        self.use_prefix_mlp = self.config.use_prefix_mlp
        self.prefix_mlp_hidden_size = self.config.prefix_mlp_hidden_size
        self.prefix_mlp_dropout = self.config.prefix_mlp_dropout
        self.prefix_mlp_activation = self.config.prefix_mlp_activation
        self.prefix_mlp_layers = self.config.prefix_mlp_layers

        # Initialize logging
        logger.info("Initialized Prefix Tuning adapter")
        logger.info(f"Configuration: {self.config}")

        # Handle attention heads - use the imported function from the top
        self.num_attention_heads = detect_attention_heads(backbone)
        logger.info(
            f"Automatically detected attention head count: {self.num_attention_heads}"
        )

        # Save parameters
        self.head_dim = self.hidden_size // self.num_attention_heads
        self.prefix_length = self.config.num_tokens
        self.prefix_dropout = self.config.dropout

        # Task management parameters
        self.task_specific = self.config.task_specific
        self.num_tasks = self.num_tasks if self.task_specific else 1
        self.current_task_id = 0

        # Validate hidden_size is divisible by num_attention_heads
        if self.hidden_size % self.num_attention_heads != 0:
            logger.warning(
                f"hidden_size({self.hidden_size}) cannot be evenly divided by num_attention_heads({self.num_attention_heads})!"
                f"This may lead to incorrect attention mechanism calculations."
            )
            # Try to provide suggestions
            for heads in [8, 12, 16, 32]:
                if self.hidden_size % heads == 0:
                    logger.info(
                        f"Suggestion: For hidden_size={self.hidden_size}, use num_attention_heads={heads}"
                    )
                    break

        # Deprecation warning - parameters now managed by hooks
        logger.warning(
            "Direct prefix parameter initialization is deprecated. "
            "Parameters are now managed by hooks in the new architecture."
        )

        # Hook management
        self._hook_managers = {}
        self._attached_model = None

    def adapt_representation(
        self,
        hidden_states: torch.Tensor,
        task_id: Optional[int] = None,
        layer_idx: Optional[int] = None,
        **kwargs,
    ) -> torch.Tensor:
        """
        Apply adaptation to model representations (hidden states).

        For prefix-tuning, this is a pass-through as the adaptation happens
        directly in the attention mechanism via hooks.

        Args:
            hidden_states: Input hidden states
            task_id: Optional task identifier
            layer_idx: Optional layer index
            **kwargs: Additional adapter-specific arguments

        Returns:
            torch.Tensor: Unmodified hidden states (pass-through)
        """
        # Set the current task if specified
        if task_id is not None and task_id != self.current_task:
            self.prepare_task(task_id)

        # For prefix tuning, adaptation happens directly in attention layers
        # through the PrefixLayerHook
        return hidden_states

    def forward(self, x: torch.Tensor, task_id: Optional[int] = None, **kwargs) -> Any:
        """
        Forward pass with prefix-tuning adapter.

        Args:
            x: Input tensor
            task_id: Optional task identifier
            **kwargs: Additional arguments for the backbone

        Returns:
            Any: Output from the backbone with prefix-tuning applied
        """
        # Set the current task if specified
        if task_id is not None:
            self.set_task(task_id)

        # Attach hooks to the model
        self.attach(
            self.backbone.model if hasattr(self.backbone, "model") else self.backbone
        )

        try:
            # Run forward pass with hooks in place
            output = self.backbone(x, **kwargs)

            # Standardize output format - simplify to avoid type checking issues
            if isinstance(output, dict) and "features" in output:
                features = output["features"]
            elif isinstance(output, torch.Tensor):
                features = output
            else:
                # Use input tensor as fallback
                logger.warning(
                    "Could not extract features from output, using input tensor"
                )
                features = x

            return output
        finally:
            # Always clean up hooks
            self.detach()

    def attach(
        self, model: nn.Module, task_id: Optional[int] = None
    ) -> HookManagerInterface:
        """
        Attach this prefix adapter to a model using hooks.

        Args:
            model: Model to apply prefix-tuning to
            task_id: Optional task identifier

        Returns:
            HookManagerInterface: A hook manager instance
        """
        # Use task_id if provided, otherwise use current task
        task_id = self.current_task if task_id is None else task_id

        # Create mapping from layer pattern to index
        layer_indices = {}

        # Create configuration for the hook manager
        config = PrefixConfig(
            num_tokens=self.prefix_length,
            token_dim=self.hidden_size,
            num_layers=self.num_layers,
            num_attention_heads=self.num_attention_heads,
            use_prefix_mlp=self.use_prefix_mlp,
            prefix_mlp_hidden_size=self.prefix_mlp_hidden_size,
            dropout=self.prefix_dropout,
            target_layers=self.config.target_layers,
            task_specific=self.task_specific,
            num_tasks=self.num_tasks,
        )

        # Import hook manager here to avoid circular imports
        from continuallearning.registry import HOOK_MANAGER_REGISTRY

        hook_manager_cls = HOOK_MANAGER_REGISTRY.get("prefix")

        # Create hook manager and attach hooks
        hook_manager = hook_manager_cls(
            model=model, config=config, layer_indices=layer_indices
        )

        # Set current task in hook manager
        if hasattr(hook_manager, "set_task"):
            hook_manager.set_task(task_id)

        # Store the hook manager
        self._hook_managers[task_id] = hook_manager
        self._attached_model = model

        return hook_manager

    def detach(self) -> None:
        """
        Detach all hooks from the model.
        """
        for manager in self._hook_managers.values():
            for hook in manager.get_hooks().values():
                hook.detach()
        self._attached_model = None

    def get_trainable_parameters(
        self, task_id: Optional[int] = None
    ) -> List[nn.Parameter]:
        """
        Get trainable parameters for this adapter.

        Args:
            task_id: Optional task identifier

        Returns:
            List[nn.Parameter]: List of trainable parameters
        """
        if task_id is not None:
            self.prepare_task(task_id)

        # If we have active hook managers, get parameters from them
        if self.current_task in self._hook_managers:
            hook_manager = self._hook_managers[self.current_task]
            return hook_manager.get_trainable_parameters()

        # Fallback: if hooks aren't attached yet, return empty list
        logger.warning(
            "No hook manager available, returning empty parameter list. "
            "Make sure to attach hooks before training."
        )
        return []
