"""
Prompt pool hooks for parameter-efficient fine-tuning.

This module provides hooks specifically designed for prompt pool methods
like Learning to Prompt (L2P), following the hook-based architecture pattern.
"""

from typing import Dict, List, Optional, Any, Union, Tuple

import torch
import torch.nn as nn
import torch.nn.functional as F

from continuallearning.models.pefts.hooks._base_hook import BaseHook, HookType
from continuallearning.models.pefts.hooks.hook_manager.hook_manager import (
    UnifiedHookManager,
)
from continuallearning.models.pefts.common.config import L2PConfig
from continuallearning.interfaces.models.hook_interfaces import HookFactoryInterface
from continuallearning.interfaces.models.prompt_interfaces import PromptPosition
from continuallearning.registry import HOOK_FACTORY_REGISTRY, HOOK_MANAGER_REGISTRY
from continuallearning.utils.logging import get_logger

logger = get_logger(__name__)


class L2pLayerHook(BaseHook):
    """
    Hook for implementing prompt pool mechanisms like L2P (Learning to Prompt).

    This hook implements the core functionality of Learning to Prompt,
    managing a prompt pool and selecting prompts based on input features.

    Args:
        module: Transformer layer to hook
        config: L2P configuration
    """

    def __init__(
        self,
        module: nn.Module,
        config: L2PConfig,
    ):
        super().__init__(module=module, hook_type=HookType.FORWARD_REPLACE)

        # Store configuration
        self.config = config

        # Track layer index and task ID (for consistency with LoRA implementation)
        self.layer_idx = getattr(config, "layer_idx", 0)
        self._task_id = getattr(config, "task_id", 0)

        # Get prompt position
        position_map = {
            "prepend": PromptPosition.PREPEND,
            "append": PromptPosition.APPEND,
            "deep": PromptPosition.DEEP,
        }
        self.prompt_position = position_map.get(
            config.prompt_position, PromptPosition.PREPEND
        )

        # Initialize prompt parameters
        self.pool_size = config.pool_size
        self.prompt_length = config.num_tokens
        self.hidden_size = config.token_dim
        self.top_k = min(config.top_k, config.pool_size)

        # Initialize prompt pool and keys
        self.prompt_pool = self._initialize_prompt_pool(config.initialization)
        self.prompt_keys = self._initialize_prompt_keys(config.prompt_key_init)

        # Optional dropout
        self.dropout = nn.Dropout(config.dropout) if config.dropout > 0 else None

    def _initialize_prompt_pool(self, init_method: str) -> nn.Parameter:
        """Initialize the prompt pool."""
        if init_method == "zero":
            prompt_pool = torch.zeros(
                self.pool_size, self.prompt_length, self.hidden_size
            )
        elif init_method == "random":
            prompt_pool = torch.randn(
                self.pool_size, self.prompt_length, self.hidden_size
            )
            # Scale by hidden size
            prompt_pool = prompt_pool * 0.02
        else:
            raise ValueError(f"Unknown prompt initialization method: {init_method}")

        return nn.Parameter(prompt_pool)

    def _initialize_prompt_keys(self, init_method: str) -> nn.Parameter:
        """Initialize prompt keys for selection."""
        if init_method == "zero":
            prompt_keys = torch.zeros(self.pool_size, self.hidden_size)
        elif init_method == "random":
            prompt_keys = torch.randn(self.pool_size, self.hidden_size)
            prompt_keys = prompt_keys * 0.02
        else:
            raise ValueError(f"Unknown prompt key initialization method: {init_method}")

        return nn.Parameter(prompt_keys)

    def _select_prompts(
        self, features: torch.Tensor
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        """
        Select prompts from the pool based on input features.

        Args:
            features: Input features [batch_size, hidden_dim]

        Returns:
            Tuple[torch.Tensor, torch.Tensor]:
                - Selected prompts [batch_size, prompt_length, hidden_dim]
                - Selection weights [batch_size, top_k]
        """
        # Compute similarity between features and prompt keys
        # [batch_size, pool_size]
        similarity = (
            F.normalize(features, dim=1) @ F.normalize(self.prompt_keys, dim=1).t()
        )

        # Select top-k prompts
        weights, selected_idx = torch.topk(similarity, k=self.top_k, dim=1)
        weights = F.softmax(weights, dim=1)  # [batch_size, top_k]

        # Gather selected prompts
        # [batch_size, top_k, prompt_length, hidden_dim]
        selected_prompts = self.prompt_pool[selected_idx]

        # Apply weights
        # [batch_size, top_k, 1, 1] * [batch_size, top_k, prompt_length, hidden_dim]
        selected_prompts = weights.unsqueeze(-1).unsqueeze(-1) * selected_prompts

        # Sum over top_k dimension
        # [batch_size, prompt_length, hidden_dim]
        selected_prompts = selected_prompts.sum(dim=1)

        return selected_prompts, weights

    def _hook_forward(
        self, hidden_states: torch.Tensor, *args, **kwargs
    ) -> torch.Tensor:
        """
        Apply prompts to hidden states based on prompt pool selection.

        Args:
            hidden_states: Input hidden states [batch_size, seq_len, hidden_dim]

        Returns:
            torch.Tensor: Hidden states with prompts applied
        """
        if not self.is_active or self._original_forward is None:
            raise RuntimeError(
                "Hook not properly attached - no original forward method available"
            )

        try:
            # Only apply prompts at the first layer (layer_idx=0)
            if self.layer_idx != 0:
                return self._original_forward(hidden_states, *args, **kwargs)

            # Extract features from the first token for prompt selection
            features = hidden_states[:, 0]  # [batch_size, hidden_dim]

            # Select prompts based on features
            selected_prompts, _ = self._select_prompts(features)

            # Apply prompts based on position
            if self.prompt_position == PromptPosition.PREPEND:
                # Prepend prompts to sequence
                new_hidden_states = torch.cat([selected_prompts, hidden_states], dim=1)
            elif self.prompt_position == PromptPosition.APPEND:
                # Append prompts to sequence
                new_hidden_states = torch.cat([hidden_states, selected_prompts], dim=1)
            else:
                # For deep prompts or other positions, return unchanged
                new_hidden_states = hidden_states

            # Call original forward with modified hidden states
            return self._original_forward(new_hidden_states, *args, **kwargs)

        except Exception as e:
            logger.error(f"Error in prompt pool hook forward: {str(e)}")
            # Fallback to original method in case of error
            logger.warning("Falling back to original forward method")
            return self._original_forward(hidden_states, *args, **kwargs)

    def get_parameters(self) -> List[nn.Parameter]:
        """Get all trainable prompt parameters."""
        return [self.prompt_pool, self.prompt_keys]


@HOOK_FACTORY_REGISTRY.register("l2p")
class L2pHookFactory(HookFactoryInterface):
    """
    Factory for creating prompt pool hooks.

    This creates PromptPoolLayerHook instances configured for L2P operation.

    Args:
        config: L2P configuration
        layer_indices: Mapping from module paths to layer indices
        task_id: Current task identifier (optional)
        **kwargs: Additional configuration parameters
    """

    def __init__(
        self,
        config: Optional[Union[Dict[str, Any], L2PConfig]] = None,
        layer_indices: Optional[Dict[str, int]] = None,
        task_id: int = 0,
        **kwargs,
    ):
        super().__init__(**kwargs)

        # Store layer indices
        self.layer_indices = layer_indices or {}

        # Store task ID directly
        self._task_id = task_id

        # Process configuration
        from continuallearning.models.pefts.common.config_factory import (
            create_peft_config,
        )

        if config is None:
            self._config = create_peft_config("l2p", kwargs)
        elif isinstance(config, dict):
            self._config = create_peft_config("l2p", config)
        else:
            self._config = config

    def __call__(self, module_path: str, module: nn.Module) -> BaseHook:
        """
        Create a prompt pool hook for a specific module.

        Args:
            module_path: Path to the module in the model
            module: Module to hook

        Returns:
            BaseHook: Hook instance for this module
        """
        # Get layer index from path
        layer_idx = self.layer_indices.get(module_path, 0)

        # Update config with layer index and task_id from the factory
        config_dict = self._config.to_dict()
        config_dict.update(
            {
                "layer_idx": layer_idx,
                "task_id": self._task_id,
            }
        )

        # Create task-specific config
        from continuallearning.models.pefts.common.config_factory import (
            create_peft_config,
        )

        layer_config = create_peft_config("l2p", config_dict)

        # Create and return hook
        return L2pLayerHook(module=module, config=layer_config)

    @property
    def config(self) -> L2PConfig:
        """Get the L2P configuration."""
        return self._config


@HOOK_MANAGER_REGISTRY.register("l2p")
class L2pHookManager(UnifiedHookManager):
    """
    Manager for prompt pool hooks across a model.

    Args:
        model: Model to apply prompts to
        factory: Hook factory to create hooks (optional)
        config: Configuration for hook creation and detection (optional)
        layer_indices: Mapping from module paths to layer indices (optional)
    """

    def __init__(
        self,
        model: nn.Module,
        factory: Optional[HookFactoryInterface] = None,
        config: Optional[Union[Dict[str, Any], L2PConfig]] = None,
        layer_indices: Optional[Dict[str, int]] = None,
    ):
        # Process configuration if needed
        from continuallearning.models.pefts.common.config_factory import (
            create_peft_config,
        )

        # Get configuration of the proper type if needed
        if config is not None and not isinstance(config, L2PConfig):
            if isinstance(config, dict):
                l2p_config = create_peft_config("l2p", config)
            else:
                l2p_config = create_peft_config("l2p", config.to_dict())
        else:
            l2p_config = config or create_peft_config("l2p", {})

        # Create hook factory if not provided
        if factory is None:
            factory = L2pHookFactory(config=l2p_config, layer_indices=layer_indices)

        # Configure module detection
        detection_config = {
            "find_transformer_layers": True,
            "target_patterns": [
                "encoder.layer",
                "blocks",
                "transformer.h",
            ],
        }

        # Initialize manager
        super().__init__(model, factory, detection_config)

    def get_trainable_parameters(self) -> List[nn.Parameter]:
        """Get all trainable parameters."""
        params = []
        for hook_path, hook in self.hooks.items():
            if hasattr(hook, "get_parameters"):
                params.extend(hook.get_parameters())
        return params
