"""
Prompt-specific hooks for parameter-efficient fine-tuning.

This module provides a hook system specifically designed for prompt-based methods,
allowing for insertion of learnable prompt tokens into transformer hidden states.
"""

from typing import Dict, List, Optional, Any, Union

import torch
import torch.nn as nn

from continuallearning.models.pefts.hooks._base_hook import (
    BaseHook,
    HookType,
)
from continuallearning.models.pefts.hook_manager.hook_manager import UnifiedHookManager
from continuallearning.models.pefts.common.config import (
    PromptConfig,
    L2PConfig,
)
from continuallearning.interfaces import HookFactoryInterface
from continuallearning.interfaces import (
    PromptPosition,
)
from continuallearning.registry import HOOK_FACTORY_REGISTRY, HOOK_MANAGER_REGISTRY
from continuallearning.utils.logging import get_logger
from continuallearning.models.pefts.hooks.l2p_hooks import (
    L2pLayerHook,
)

logger = get_logger(__name__)


class PromptLayerHook(BaseHook):
    """
    Base hook for injecting prompts into transformer layer hidden states.

    This hook replaces the forward method of transformer layers to
    inject learnable prompt tokens into the hidden states. This implementation
    manages its own parameters directly, following the pure hook-based design pattern.

    Args:
        module: Transformer layer to hook
        config: Prompt configuration
    """

    def __init__(
        self,
        module: nn.Module,
        config: PromptConfig,
    ):
        # We use FORWARD_REPLACE type for prompt hooks since we need to
        # intercept the hidden states before they're processed by the layer
        super().__init__(module=module, hook_type=HookType.FORWARD_REPLACE)
        self.config = config

        # Basic initialization
        self._prompt_position = self._get_prompt_position(config.prompt_position)
        self._layer_idx = getattr(config, "layer_idx", 0)
        self._task_id = getattr(
            config, "task_id", 0
        )  # Track task ID directly from config

        # Initialize prompt parameters
        self.num_tokens = config.num_tokens
        self.token_dim = config.token_dim
        self.prompt_dropout = nn.Dropout(config.dropout) if config.dropout > 0 else None

        # Initialize prompt tokens directly in the hook
        self.prompt_embeddings = self._initialize_prompt_embeddings(
            config.initialization
        )

    def _initialize_prompt_embeddings(self, init_method: str) -> nn.Parameter:
        """Initialize prompt embeddings based on initialization method."""
        if init_method == "zero":
            prompt_embeds = torch.zeros(self.num_tokens, self.token_dim)
        elif init_method == "random":
            prompt_embeds = torch.randn(self.num_tokens, self.token_dim)
            # Scale by hidden size
            prompt_embeds = prompt_embeds * 0.02
        else:
            # Default to random initialization
            logger.warning(
                f"Unknown prompt initialization method: {init_method}. Using random."
            )
            prompt_embeds = torch.randn(self.num_tokens, self.token_dim) * 0.02

        return nn.Parameter(prompt_embeds)

    def _get_prompt_position(self, position_str: str) -> PromptPosition:
        """Convert string position to enum if needed."""
        position_map = {
            "prepend": PromptPosition.PREPEND,
            "append": PromptPosition.APPEND,
            "deep": PromptPosition.DEEP,
        }
        return position_map.get(position_str, PromptPosition.PREPEND)

    def _hook_forward(
        self, hidden_states: torch.Tensor, *args, **kwargs
    ) -> torch.Tensor:
        """
        Apply prompts to hidden states and then call original forward method.

        Args:
            hidden_states: Input hidden states tensor
            *args, **kwargs: Additional arguments for the original layer

        Returns:
            torch.Tensor: Transformed output tensor

        Raises:
            RuntimeError: If the hook is not properly attached
        """
        if not self.is_active or self._original_forward is None:
            raise RuntimeError(
                "Hook not properly attached - no original forward method available"
            )

        try:
            # Apply prompts to the hidden states
            hidden_states_with_prompts = self._apply_prompts(hidden_states)

            # Call original layer forward with adapted hidden states
            return self._original_forward(hidden_states_with_prompts, *args, **kwargs)
        except Exception as e:
            logger.error(f"Error in prompt hook forward: {str(e)}")
            # Fallback to original method in case of error
            logger.warning("Falling back to original forward method")
            return self._original_forward(hidden_states, *args, **kwargs)

    def _apply_prompts(self, hidden_states: torch.Tensor) -> torch.Tensor:
        """
        Apply prompts to hidden states.

        Args:
            hidden_states: Input hidden states [batch_size, seq_len, hidden_dim]

        Returns:
            torch.Tensor: Hidden states with prompts applied
        """
        # Get device from hidden states
        device = hidden_states.device

        # Get batch size from hidden states
        batch_size = hidden_states.size(0)

        # Move prompt embeddings to the same device as hidden states
        prompt_embeds = self.prompt_embeddings.to(device)

        # Apply dropout if configured
        if self.prompt_dropout is not None:
            prompt_embeds = self.prompt_dropout(prompt_embeds)

        # Expand prompts to match batch size [num_tokens, dim] -> [batch_size, num_tokens, dim]
        batch_prompts = prompt_embeds.unsqueeze(0).expand(batch_size, -1, -1)

        # Apply prompts based on position
        if self._prompt_position == PromptPosition.PREPEND:
            # Prepend prompts to sequence
            hidden_states_with_prompts = torch.cat(
                [batch_prompts, hidden_states], dim=1
            )
        elif self._prompt_position == PromptPosition.APPEND:
            # Append prompts to sequence
            hidden_states_with_prompts = torch.cat(
                [hidden_states, batch_prompts], dim=1
            )
        else:  # PromptPosition.DEEP or other cases
            # For deep prompts, we don't modify the hidden states
            hidden_states_with_prompts = hidden_states
            logger.warning(
                f"Prompt position {self._prompt_position} not supported in PromptLayerHook"
            )

        return hidden_states_with_prompts

    def get_parameters(self) -> List[nn.Parameter]:
        """
        Get trainable parameters for this hook.

        Returns:
            List[nn.Parameter]: List of trainable prompt parameters
        """
        return [self.prompt_embeddings]


class FixedPromptLayerHook(PromptLayerHook):
    """
    Hook for fixed prompt tuning.

    This extends the base PromptLayerHook with specific behavior for fixed prompts.
    """

    def __init__(
        self,
        module: nn.Module,
        config: PromptConfig,
    ):
        super().__init__(module=module, config=config)


@HOOK_FACTORY_REGISTRY.register("prompt")
class PromptHookFactory(HookFactoryInterface):
    """
    Factory for creating Prompt hooks.

    This follows the Factory pattern for creating hooks with specific configurations.
    The design aligns with a clean, dependency-free architecture where hooks own their
    parameters and don't depend on external components.

    Args:
        config: Prompt configuration or dictionary
        layer_indices: Mapping from module paths to layer indices
        task_id: Current task identifier (optional)
        **kwargs: Additional configuration parameters
    """

    def __init__(
        self,
        config: Optional[Union[Dict[str, Any], PromptConfig, L2PConfig]] = None,
        layer_indices: Optional[Dict[str, int]] = None,
        task_id: int = 0,
        **kwargs,
    ):
        super().__init__(**kwargs)

        # Store layer indices for transformer layers
        self.layer_indices = layer_indices or kwargs.get("layer_indices", {})

        # Store task ID directly
        self._task_id = task_id

        # Process configuration
        from continuallearning.models.pefts.common.config_factory import (
            create_peft_config,
        )

        if config is None:
            self._config = create_peft_config("prompt", kwargs)
        elif isinstance(config, dict):
            self._config = create_peft_config("prompt", config)
        else:
            self._config = config

    def __call__(self, module_path: str, module: nn.Module) -> BaseHook:
        """
        Create a Prompt hook for a specific module.

        Args:
            module_path: Path to the module in the model
            module: Module to hook

        Returns:
            BaseHook: Hook instance for this module
        """
        # Get layer index from path, defaulting to 0 if not found
        layer_idx = self.layer_indices.get(module_path, 0)

        # Update the config with the specific layer index and current task
        config_dict = self.prompt_config.to_dict()
        config_dict.update(
            {
                "layer_idx": layer_idx,
                "task_id": self._task_id,
            }
        )

        # Create an updated configuration for this specific layer
        from continuallearning.models.pefts.common.config_factory import (
            create_peft_config,
        )

        layer_config = create_peft_config("prompt", config_dict)

        # Create hook based on config type
        hook: BaseHook
        if isinstance(layer_config, L2PConfig):
            hook = L2pLayerHook(module=module, config=layer_config)
        else:
            hook = PromptLayerHook(module=module, config=layer_config)

        return hook

    @property
    def prompt_config(self) -> Union[PromptConfig, L2PConfig]:
        """Get the prompt configuration"""
        return self._config

    @property
    def config(self) -> PromptConfig:
        return self.prompt_config


@HOOK_MANAGER_REGISTRY.register("prompt")
class PromptHookManager(UnifiedHookManager):
    """
    Manager for prompt hooks across a model.

    This class handles finding transformer layers and attaching/detaching
    prompt hooks to them, using the unified hook manager implementation.

    Args:
        model: Model to apply prompts to
        adapter: Prompt adapter providing prompt functionality
        factory: Hook factory to create hooks (optional)
        layer_indices: Mapping from module paths to layer indices (optional)
        config: Configuration for hook creation and module detection (optional)
    """

    def __init__(
        self,
        model: nn.Module,
        adapter: PromptAdapterInterface,
        factory: Optional[HookFactoryInterface] = None,
        layer_indices: Optional[Dict[str, int]] = None,
        config: Optional[Union[Dict[str, Any], PromptConfig]] = None,
    ):
        # Process configuration if factory not provided
        if factory is None:
            from continuallearning.models.pefts.common.config_factory import (
                create_peft_config,
            )

            # Get configuration of the proper type
            if config is None:
                prompt_config = create_peft_config("prompt", {})
            elif isinstance(config, dict):
                prompt_config = create_peft_config("prompt", config)
            elif isinstance(config, PromptConfig):
                prompt_config = config
            else:
                # Convert other BasePEFTConfig types to PromptConfig
                prompt_config = create_peft_config("prompt", config.to_dict())

            # Get current task directly from adapter
            current_task = getattr(adapter, "current_task", 0)

            # Create hook factory with task_id parameter
            factory = PromptHookFactory(
                task_id=current_task, layer_indices=layer_indices, config=prompt_config
            )

        # Configure module detection based on adapter configuration
        detection_config = {
            "find_transformer_layers": True,
            "target_patterns": getattr(
                adapter,
                "target_modules",
                [
                    "encoder.layer",
                    "blocks",
                    "transformer.h",
                ],
            ),
        }

        # Initialize the unified hook manager with the prompt-specific setup
        super().__init__(model, factory, detection_config)

        # Store reference to prompt adapter
        self.prompt_adapter = adapter

    def get_trainable_parameters(self) -> List[nn.Parameter]:
        """
        Get all prompt parameters from hooks.

        For prompt-based methods, trainable parameters should be collected from hooks
        directly, following the clean architecture where hooks own their parameters.

        Returns:
            List[nn.Parameter]: List of trainable prompt parameters
        """
        # Direct parameter collection from all hooks
        all_params = []

        # Collect parameters from all hooks if they exist
        if hasattr(self, "hooks") and self.hooks:
            for hook_path, hook in self.hooks.items():
                if hasattr(hook, "get_parameters"):
                    hook_params = hook.get_parameters()
                    if hook_params:
                        all_params.extend(hook_params)

        # If no parameters found in hooks, log a warning
        if not all_params:
            logger.warning(
                "No parameters found in hooks. Make sure hooks are properly attached."
            )

        return all_params


# Helper function to simplify prompt application to models
def apply_prompts_to_transformer(
    model: nn.Module,
    prompt_adapter: PromptAdapterInterface,
    config: Optional[Dict[str, Any]] = None,
) -> PromptHookManager:
    """
    Helper function to apply prompts to a transformer model.

    This is a convenience function that creates a PromptHookManager,
    attaches hooks to the model, and returns the manager.

    Args:
        model: Transformer model to apply prompts to
        prompt_adapter: Adapter providing the prompts
        config: Optional configuration for transformer layer detection

    Returns:
        PromptHookManager: The hook manager for the prompts
    """
    # Create factory with task_id from adapter
    factory = PromptHookFactory(
        task_id=getattr(prompt_adapter, "current_task", 0), config=config
    )

    # Create and attach the manager
    manager = PromptHookManager(model, prompt_adapter, factory=factory)
    manager.attach()
    return manager
