"""
Base implementation for prompt-based parameter-efficient fine-tuning methods.

This module provides a foundation for implementing different prompt-based methods
for continual learning, following a consistent interface.
"""

from typing import List, Optional, Union, Dict, Any

import torch
import torch.nn as nn

from continuallearning.interfaces.models.adapter import AdapterType
from continuallearning.interfaces.models.prompt_interfaces import (
    PromptAdapterInterface,
    PromptPosition,
)
from continuallearning.interfaces.models.hook_interfaces import HookManagerInterface
from continuallearning.interfaces.types import AdapterOutput
from continuallearning.models.backbones.base import BaseBackbone
from continuallearning.models.pefts.hooks._base_hook import BaseHookAdapter
from continuallearning.models.pefts.common.config import TokenBasedPEFTConfig
from continuallearning.models.pefts.common.config_factory import create_peft_config
from continuallearning.models.pefts.common.exceptions import ConfigurationError

# Use custom logging system
from continuallearning.utils.logging import get_logger

logger = get_logger(__name__)


class BasePromptAdapter(BaseHookAdapter, PromptAdapterInterface):
    """
    Base class for prompt-based adapters.

    This class provides common functionality for all prompt-based methods,
    reducing code duplication and ensuring consistent behavior.

    Args:
        backbone: Backbone model to adapt
        config: Configuration object or dictionary
        **kwargs: Additional configuration parameters (used when config is None)
    """

    def __init__(
        self,
        backbone: BaseBackbone,
        config: Optional[Union[Dict[str, Any], TokenBasedPEFTConfig]] = None,
        **kwargs,
    ):
        """
        Initialize the base prompt adapter.

        Supports two initialization methods:
        1. Providing a configuration object or dictionary
        2. Providing individual parameters as kwargs

        Priority: direct parameters > config parameters > auto-detection > defaults

        Args:
            backbone: Backbone model to adapt
            config: TokenBasedPEFTConfig or dictionary
            **kwargs: Additional configuration parameters (used when config is None)
        """
        # Initialize base hook adapter with default adapter type
        # The actual adapter type will be set from config later
        super().__init__(backbone=backbone, adapter_type=AdapterType.POST_BACKBONE)

        # Process configuration using unified factory pattern
        try:
            # Create configuration using factory
            self.config = self._create_config(config, **kwargs)

            # Setup parameters from configuration
            self._setup_from_config()

            # Initialize hook managers dictionary
            self._hook_managers = {}

            # Log initialization
            logger.info(f"Initialized {self.__class__.__name__} adapter")
            logger.debug(f"Configuration: {self.config}")

        except Exception as e:
            # Provide informative error message on configuration failure
            raise ConfigurationError(
                f"Failed to initialize prompt configuration: {str(e)}", method="prompt"
            ) from e

    def _create_config(
        self,
        config: Optional[Union[Dict[str, Any], TokenBasedPEFTConfig]] = None,
        **kwargs,
    ) -> TokenBasedPEFTConfig:
        """
        Create configuration object using factory pattern.

        Args:
            config: Configuration object or dictionary
            **kwargs: Configuration parameters (used when config is None)

        Returns:
            TokenBasedPEFTConfig: The processed configuration
        """
        # Process configuration, prioritizing explicit config object
        if config is not None:
            if isinstance(config, dict):
                return create_peft_config("prompt", config)
            else:
                return config
        else:
            # Create configuration from parameters
            return create_peft_config("prompt", kwargs)

    def _setup_from_config(self) -> None:
        """
        Set up adapter parameters from configuration.

        This method extracts necessary values from the configuration
        and sets corresponding adapter parameters.
        """
        # Set adapter type from config if specified
        if hasattr(self.config, "adapter_type"):
            self._adapter_type = getattr(self.config, "adapter_type")

        # Extract necessary dimensions from config
        self._hidden_size = self.config.token_dim
        self._num_layers = self.config.num_layers
        self._num_prompts = self.config.num_tokens
        self._prompt_position = PromptPosition(self.config.position)

        # Validate configuration
        self._validate_config()

    def _validate_config(self) -> None:
        """
        Validate the configuration.

        This checks that the configuration has all required values
        and that they are valid for this adapter type.

        Raises:
            ConfigurationError: If the configuration is invalid
        """
        # Basic validation - check that we have necessary parameters
        required_attrs = ["token_dim", "num_tokens", "position"]
        for attr in required_attrs:
            if not hasattr(self.config, attr):
                raise ConfigurationError(
                    f"Missing required configuration parameter: {attr}", method="prompt"
                )

        # Check that prompt position is valid
        valid_positions = ["prepend", "append", "deep"]
        if self.config.position not in valid_positions:
            raise ConfigurationError(
                f"Invalid prompt position: {self.config.position}. "
                f"Must be one of: {', '.join(valid_positions)}",
                method="prompt",
            )

    @property
    def prompt_position(self) -> PromptPosition:
        """Get the position where prompts are applied."""
        return self._prompt_position

    @property
    def num_prompts(self) -> int:
        """Get the number of prompt tokens."""
        return self._num_prompts

    @property
    def prompt_dim(self) -> int:
        """Get the dimensionality of prompt embeddings."""
        return self._hidden_size

    def adapt_representation(
        self,
        hidden_states: torch.Tensor,
        task_id: Optional[int] = None,
        layer_idx: Optional[int] = None,
        **kwargs,
    ) -> torch.Tensor:
        """
        Apply adaptation to model representations (hidden states).

        For prompt-based adapters, this applies prompt tokens to the input
        hidden states based on the specific prompt method implementation.

        Args:
            hidden_states: Input hidden states to adapt
            task_id: Optional task identifier (defaults to current task)
            layer_idx: Optional layer index for layer-specific adaptations
            **kwargs: Additional adapter-specific arguments

        Returns:
            torch.Tensor: Adapted hidden states with prompts applied
        """
        # Use the current task if none provided
        if task_id is None:
            task_id = self.current_task

        # By default, this should be implemented by subclasses
        raise NotImplementedError(
            "Subclasses must implement adapt_representation method"
        )

    def attach(
        self, model: nn.Module, task_id: Optional[int] = None
    ) -> HookManagerInterface:
        """
        Attach adapter hooks to model.

        This method overrides the BaseHookAdapter's implementation to add
        prompt-specific behaviors while leveraging the base functionality.

        Args:
            model: Model to attach hooks to
            task_id: Optional task identifier

        Returns:
            HookManagerInterface: The created hook manager
        """
        # Use task_id if provided, otherwise use current task
        task_id = self.current_task if task_id is None else task_id

        # Leverage the implementation in BaseHookAdapter
        hook_manager = super().attach(model)

        # Store the hook manager (already done in BaseHookAdapter)
        # Set as the current active hook manager
        self._hook_managers[task_id] = hook_manager

        return hook_manager

    def get_prompt_params(self) -> List[nn.Parameter]:
        """
        Get trainable prompt parameters.

        In the hook-based architecture, parameters are managed by the hooks,
        not by the adapter directly. This method attempts to fetch parameters
        from the hook manager if available, with a fallback to adapter parameters.

        Returns:
            List[nn.Parameter]: List of prompt parameters
        """

        # Define a helper method to avoid circular references with hook manager
        def get_params_from_adapter() -> List[nn.Parameter]:
            """Helper to get parameters directly from adapter without going through hooks"""
            adapter_params = list(self.parameters())
            if adapter_params:
                logger.debug(
                    f"Returning {len(adapter_params)} parameters directly from adapter"
                )
                return adapter_params
            return []

        # If we have active hook managers, consider getting parameters from them
        if hasattr(self, "_hook_managers") and self._hook_managers:
            task_id = self.current_task
            if task_id in self._hook_managers:
                hook_manager = self._hook_managers[task_id]

                # Only use the hook manager's parameters if it's in a safe state
                # to avoid circular references
                if (
                    hook_manager.is_attached
                    and hasattr(hook_manager, "hooks")
                    and hook_manager.hooks
                ):
                    # If hooks exist and are attached, we can safely get parameters from hooks directly
                    # This avoids calling hook_manager.get_trainable_parameters() which might call back to us
                    all_params = []
                    for hook_path, hook in hook_manager.hooks.items():
                        if hasattr(hook, "get_parameters"):
                            hook_params = hook.get_parameters()
                            if hook_params:
                                all_params.extend(hook_params)

                    if all_params:
                        logger.debug(f"Found {len(all_params)} parameters from hooks")
                        return all_params

                # If we couldn't get parameters from hooks directly, try attaching temporarily
                elif (
                    not hook_manager.is_attached
                    and hasattr(self, "backbone")
                    and self.backbone is not None
                ):
                    # If hooks are not attached but we have a backbone, temporarily attach
                    with hook_manager:  # Use context manager for automatic cleanup
                        # Instead of calling get_trainable_parameters(), collect directly from hooks
                        all_params = []
                        for hook_path, hook in hook_manager.hooks.items():
                            if hasattr(hook, "get_parameters"):
                                hook_params = hook.get_parameters()
                                if hook_params:
                                    all_params.extend(hook_params)

                        if all_params:
                            logger.debug(
                                f"Found {len(all_params)} parameters from temporarily attached hooks"
                            )
                            return all_params

        # Fallback to adapter parameters
        adapter_params = get_params_from_adapter()
        if adapter_params:
            if hasattr(self, "_hook_managers") and self._hook_managers:
                logger.warning(
                    "Using adapter parameters directly instead of hook parameters. "
                    "This approach is deprecated in the hook-based architecture."
                )
            return adapter_params

        # If no parameters are found, return empty list
        logger.warning(
            "No hook manager available and no adapter parameters found. "
            "Make sure to attach hooks before training."
        )
        return []

    def get_trainable_parameters(
        self, task_id: Optional[int] = None
    ) -> List[nn.Parameter]:
        """
        Get trainable parameters for this adapter.

        This method leverages the get_prompt_params method,
        ensuring hook-based parameter fetching with a fallback.

        Args:
            task_id: Optional task identifier

        Returns:
            List[nn.Parameter]: List of trainable parameters
        """
        if task_id is not None:
            self.prepare_task(task_id)

        return self.get_prompt_params()

    def forward(
        self, x: torch.Tensor, task_id: Optional[int] = None, **kwargs
    ) -> AdapterOutput:
        """
        Forward pass through the prompt adapter.

        This implementation uses the Hook mechanism to apply prompts during backbone forward pass.
        Uses the parent implementation from BaseHookAdapter to avoid code duplication.

        Args:
            x: Input tensor
            task_id: Optional task identifier
            **kwargs: Additional arguments for specific prompt methods

        Returns:
            AdapterOutput: Adapter output containing modified features
        """
        # Set the current task if specified
        if task_id is not None:
            self.prepare_task(task_id)

        # Use the BaseHookAdapter forward implementation
        return super().forward(x, task_id, **kwargs)

    def _create_hook_manager(
        self, model: nn.Module, task_id: int
    ) -> HookManagerInterface:
        """
        Create a hook manager for a specific task.

        This method implements the abstract method from BaseHookAdapter,
        creating the appropriate PromptHookManager for prompt-based adapters.

        Args:
            model: Model to apply hooks to
            task_id: Task identifier

        Returns:
            HookManagerInterface: Hook manager for the adapter
        """
        # Import here to avoid circular imports
        from continuallearning.models.pefts.prompt.hooks.prompt_hooks import (
            PromptHookManager,
            PromptHookFactory,
        )

        # Create factory with direct task_id parameter
        factory = PromptHookFactory(
            task_id=task_id,
            config=self.config,
        )

        # Create hook manager for the specific task
        hook_manager = PromptHookManager(
            model=model,
            adapter=self,  # Still pass adapter for target module patterns
            factory=factory,
        )

        return hook_manager
