"""
Learning to Prompt for Continual Learning (L2P) implementation.

This module implements the L2P method from:
"Learning To Prompt for Continual Learning" (Wang et al., CVPR 2022)
"""

from typing import List, Optional, Dict, Any, Union, cast

import torch
import torch.nn as nn

from continuallearning.interfaces.types import AdapterOutput
from continuallearning.registry import HOOK_ADAPTER_REGISTRY
from continuallearning.interfaces.models.hook_interfaces import HookManagerInterface
from continuallearning.models.pefts.prompt.modules.base_prompt_adapter import (
    BasePromptAdapter,
)
from continuallearning.models.backbones.base import BaseBackbone
from continuallearning.models.pefts.common.config import L2PConfig, TokenBasedPEFTConfig
from continuallearning.models.pefts.common.config_factory import create_peft_config
from continuallearning.models.pefts.common.exceptions import ConfigurationError

# 使用自定义日志系统创建记录器
from continuallearning.utils.logging import get_logger

logger = get_logger(__name__)


@HOOK_ADAPTER_REGISTRY.register()
class L2PAdapter(BasePromptAdapter):
    """
    Learning to Prompt (L2P) adapter implementation.

    L2P introduces a prompt pool and a prompt selection mechanism based on key-based
    attention for continual learning.

    Args:
        backbone: Backbone model to adapt
        config: Configuration object or dictionary
        **kwargs: Configuration parameters (used when config is None)
    """

    def __init__(
        self,
        backbone: BaseBackbone,
        config: Optional[Union[Dict[str, Any], TokenBasedPEFTConfig]] = None,
        **kwargs,
    ):
        """
        Initialize a Learning to Prompt (L2P) adapter.

        Supports two initialization methods:
        1. Providing a configuration object or dictionary
        2. Providing individual parameters as kwargs

        Args:
            backbone: Backbone model to adapt
            config: Optional L2PConfig or dictionary
            **kwargs: Additional configuration parameters (used when config is None)
        """
        # Initialize base adapter with minimal settings
        # The complete initialization will happen in _setup_from_config
        super().__init__(backbone=backbone, config=config, **kwargs)

        # Process L2P-specific configuration
        try:
            # Setup L2P-specific parameters
            self._setup_l2p_from_config()

            # Initialize prompt pool and keys
            self._initialize_prompts()

            # Log initialization
            logger.info(f"Initialized L2P adapter with pool size {self.pool_size}")
            logger.debug(f"Configuration: {self.config}")

        except Exception as e:
            # Provide informative error message on configuration failure
            raise ConfigurationError(
                f"Failed to initialize L2P configuration: {str(e)}", method="l2p"
            ) from e

        # Cache for features during forward pass
        self._cached_features = None

    def _create_config(
        self,
        config: Optional[Union[Dict[str, Any], TokenBasedPEFTConfig]] = None,
        **kwargs,
    ) -> L2PConfig:
        """
        Create configuration object using factory pattern.

        Args:
            config: Configuration object or dictionary
            **kwargs: Configuration parameters (used when config is None)

        Returns:
            L2PConfig: The processed configuration
        """
        # Process configuration, prioritizing explicit config object
        if config is not None:
            if isinstance(config, dict):
                return create_peft_config("l2p", config)
            else:
                # Ensure we have an L2PConfig
                if not isinstance(config, L2PConfig):
                    config = create_peft_config("l2p", config.to_dict())
                return cast(L2PConfig, config)
        else:
            # Create configuration from parameters
            return create_peft_config("l2p", kwargs)

    def _setup_l2p_from_config(self) -> None:
        """
        Set up L2P-specific parameters from configuration.
        """
        # First call the parent method to set up basic prompt parameters
        # This ensures we have all the basic prompt configuration set up
        super()._setup_from_config()

        # Extract L2P-specific parameters
        # In the new hook-based design, we don't need to store these here
        # but we keep them for backward compatibility
        config_dict = self.config.to_dict()
        self.pool_size = config_dict.get("pool_size", 10)
        self.top_k = config_dict.get("top_k", 5)

        # Validate parameters
        if self.top_k > self.pool_size:
            logger.warning(
                f"top_k ({self.top_k}) is larger than pool_size ({self.pool_size}). "
                f"Setting top_k to pool_size."
            )
            self.top_k = self.pool_size

    def _initialize_prompts(self) -> None:
        """
        Initialize prompt pool and keys (backwards compatibility).

        Note: in the new hook-based implementation, the prompts are
        initialized in the prompt pool hooks rather than in the adapter.
        This method is kept for backwards compatibility.
        """
        # Log a warning that this method is deprecated
        logger.warning(
            "L2PAdapter._initialize_prompts is deprecated. "
            "Prompts are now initialized in PromptPoolLayerHook."
        )

        # Keep references to the pool and keys for backwards compatibility
        # But don't actually initialize any parameters here
        self.prompt_pool = None
        self.prompt_keys = None

    @property
    def l2p_config(self) -> L2PConfig:
        """
        提供向后兼容的访问器，保持与现有代码的兼容性

        Returns:
            L2PConfig: 当前配置
        """
        return cast(L2PConfig, self.config)

    @property
    def prompt_length(self) -> int:
        """Get the prompt length (for backward compatibility)."""
        return self.num_prompts

    def get_prompt_params(self) -> List[nn.Parameter]:
        """
        Get trainable prompt parameters.

        In the new hook-based design, prompt parameters are managed by the hooks,
        not by the adapter. For backwards compatibility, this now fetches
        parameters from the hook manager if available.

        Returns:
            List[nn.Parameter]: List of trainable prompt parameters
        """
        # If we have active hook managers, get parameters from them
        if hasattr(self, "_hook_managers") and self._hook_managers:
            task_id = self.current_task
            if task_id in self._hook_managers:
                hook_manager = self._hook_managers[task_id]
                return hook_manager.get_trainable_parameters()

        # Fallback: if hooks aren't attached yet, return empty list
        logger.warning(
            "No hook manager available, returning empty parameter list. "
            "Make sure to attach hooks before training."
        )
        return []

    def adapt_representation(
        self,
        hidden_states: torch.Tensor,
        task_id: Optional[int] = None,
        layer_idx: Optional[int] = None,
        **kwargs,
    ) -> torch.Tensor:
        """
        Apply adaptation to model representations (hidden states).

        In the new hook-based architecture, this method is simplified to match the LoRA implementation.
        The prompt selection and application logic is handled by PromptPoolLayerHook hooks,
        so this method just passes through the hidden states after ensuring the right task is active.

        Args:
            hidden_states: Input hidden states [batch_size, seq_len, hidden_dim]
            task_id: Optional task identifier (used to prepare the correct task)
            layer_idx: Layer index (unused in this implementation)
            **kwargs: Additional arguments (unused in this implementation)

        Returns:
            torch.Tensor: Original hidden states (hooks handle prompting)
        """
        # If task_id is provided and differs from current task, switch to the new task
        if task_id is not None and task_id != self.current_task:
            self.prepare_task(task_id)

        # In the hook-based architecture, no modification to hidden states is needed here
        # The prompts are applied by the PromptPoolLayerHook attached to each relevant layer
        return hidden_states

    def forward(
        self, x: torch.Tensor, task_id: Optional[int] = None, **kwargs
    ) -> AdapterOutput:
        """
        Forward pass with L2P adapter.

        This method first extracts features from the input to select prompts,
        then attaches hooks to inject the selected prompts during model forward pass.

        Args:
            x: Input tensor
            task_id: Optional task identifier
            **kwargs: Additional arguments for the backbone

        Returns:
            AdapterOutput: Adapter output with features from prompted model
        """
        # Set the current task if specified
        if task_id is not None:
            self.set_task(task_id)

        # First get features from the model for prompt selection
        # We use a quick forward pass to get these features
        with torch.no_grad():
            if hasattr(self.backbone, "extract_features"):
                # Use extract_features if available (more efficient)
                self._cached_features = self.backbone.extract_features(x, **kwargs)
            else:
                # Fall back to running the full model
                try:
                    outputs = self.backbone(x, **kwargs)
                    # Simplify feature extraction logic to avoid type checking issues
                    if isinstance(outputs, dict) and "features" in outputs:
                        self._cached_features = outputs["features"]
                    else:
                        # Use a generic approach to extract features
                        self._cached_features = x.mean(dim=1)  # Fallback
                        logger.warning("Using mean pooling as feature representation.")
                except Exception as e:
                    logger.error(f"Error extracting features for L2P: {e}")
                    self._cached_features = x.mean(dim=1)  # Fallback

        # Now do the actual forward pass with hooks that will use the cached features
        # Use hook mechanism without assigning to unused variable
        self.attach(
            self.backbone.model if hasattr(self.backbone, "model") else self.backbone
        )

        try:
            # Run forward pass with hooks in place
            output = self.backbone(x, **kwargs)

            # Standardize output format - simplify to avoid type checking issues
            if isinstance(output, dict) and "features" in output:
                features = output["features"]
            elif isinstance(output, torch.Tensor):
                features = output
            else:
                # Use input tensor as fallback
                logger.warning(
                    "Could not extract features from output, using input tensor"
                )
                features = x

            return AdapterOutput(
                features=features,
                adapter_hidden_states=None,
            )
        except Exception as e:
            logger.error(f"Error in L2P forward pass: {e}")
            # In case of error, try to return something useful
            return AdapterOutput(
                features=self._cached_features,
                adapter_hidden_states=None,
            )
        finally:
            # Always clean up hooks
            self.detach()
            # Clear the feature cache
            del self._cached_features
            self._cached_features = None

    def attach(
        self, model: nn.Module, task_id: Optional[int] = None
    ) -> HookManagerInterface:
        """
        Attach adapter hooks to model.

        This creates a PromptPoolHookManager and attaches hooks to inject prompts
        during the model's forward pass.

        Args:
            model: Model to attach hooks to
            task_id: Optional task identifier

        Returns:
            HookManagerInterface: The created hook manager
        """
        # Import here to avoid circular imports
        from continuallearning.models.pefts.prompt.hooks.l2p_hooks import (
            L2pHookManager,
            L2pHookFactory,
        )

        # Use task_id if provided, otherwise use current task
        task_id = self.current_task if task_id is None else task_id

        # Create hook factory with direct task_id parameter
        factory = L2pHookFactory(
            config=self.config,
            task_id=task_id,
        )

        # Create hook manager for the specific task
        hook_manager = L2pHookManager(
            model=model,
            factory=factory,
            config=self.config,
        )

        # Store the hook manager
        self._hook_managers[task_id] = hook_manager

        # Activate the hooks
        hook_manager.attach()

        return hook_manager
