"""
DualPrompt hooks implementation for parameter-efficient fine-tuning.

This module provides hooks for implementing the DualPrompt method, which combines
general (G) prompts shared across tasks with expert (E) prompts that are task-specific.
"""

from typing import Dict, List, Optional, Any, Union, Tuple

import torch
import torch.nn as nn

from continuallearning.models.pefts.hooks._base_hook import BaseHook, HookType
from continuallearning.models.pefts.hooks.hook_manager.hook_manager import (
    UnifiedHookManager,
)
from continuallearning.models.pefts.common.config import (
    DualPromptConfig,
)
from continuallearning.interfaces.models.hook_interfaces import HookFactoryInterface
from continuallearning.interfaces.models.prompt_interfaces import PromptPosition
from continuallearning.registry import HOOK_FACTORY_REGISTRY, HOOK_MANAGER_REGISTRY
from continuallearning.utils.logging import get_logger

logger = get_logger(__name__)


class DualPromptLayerHook(BaseHook):
    """
    Hook for implementing DualPrompt functionality.

    This hook manages both general (G) and expert (E) prompts and applies them
    to transformer layers based on configuration.

    Args:
        module: Transformer layer to hook
        config: DualPrompt configuration
    """

    def __init__(
        self,
        module: nn.Module,
        config: DualPromptConfig,
    ):
        super().__init__(module=module, hook_type=HookType.FORWARD_REPLACE)

        # Store configuration
        self.config = config

        # Store layer index
        self.layer_idx = getattr(config, "layer_idx", 0)

        # Initialize prompt parameters
        self.hidden_size = config.token_dim
        self.g_prompt_length = config.g_prompt_length
        self.e_prompt_length = config.e_prompt_length
        self.num_tasks = config.num_tasks
        self.use_prefix_tuning = config.use_prefix_tuning
        self.prompt_position = getattr(
            config, "prompt_position", PromptPosition.PREPEND
        )
        self.shared_prompt_pool = config.shared_prompt_pool

        # Default to task 0 until explicitly set
        self.current_task_id = 0

        # Initialize prompts
        self._initialize_prompt_parameters()

    def _initialize_prompt_parameters(self) -> None:
        """Initialize prompts based on configuration."""
        prompt_init = getattr(self.config, "prompt_init", "random")

        if not self.use_prefix_tuning:
            # Initialize standard input prompts
            # G-Prompts (shared across tasks)
            if self.shared_prompt_pool:
                # Single set of prompts shared across all layers
                g_prompt_shape = (self.g_prompt_length, self.hidden_size)
            else:
                # Layer-specific prompts
                g_prompt_shape = (1, self.g_prompt_length, self.hidden_size)

            # Initialize G-Prompts
            if prompt_init == "zero":
                self.g_prompts = nn.Parameter(torch.zeros(*g_prompt_shape))
            else:  # random init
                self.g_prompts = nn.Parameter(torch.randn(*g_prompt_shape) * 0.02)

            # E-Prompts (task-specific)
            if self.shared_prompt_pool:
                # Shape: [num_tasks, e_prompt_length, hidden_size]
                e_prompt_shape = (
                    self.num_tasks,
                    self.e_prompt_length,
                    self.hidden_size,
                )
            else:
                # Shape: [num_tasks, 1, e_prompt_length, hidden_size]
                e_prompt_shape = (
                    self.num_tasks,
                    1,
                    self.e_prompt_length,
                    self.hidden_size,
                )

            if prompt_init == "zero":
                self.e_prompts = nn.Parameter(torch.zeros(*e_prompt_shape))
            else:  # random init
                self.e_prompts = nn.Parameter(torch.randn(*e_prompt_shape) * 0.02)
        else:
            # Initialize prefix tuning parameters
            # This will be implemented in a separate PrefixDualPromptLayerHook
            # for the prefix tuning variant of DualPrompt
            pass

    def _hook_forward(
        self, hidden_states: torch.Tensor, *args, **kwargs
    ) -> torch.Tensor:
        """
        Apply dual prompts to hidden states.

        Args:
            hidden_states: Input hidden states [batch_size, seq_len, hidden_dim]
            *args: Additional positional arguments
            **kwargs: Additional keyword arguments

        Returns:
            torch.Tensor: Hidden states with prompts applied
        """
        if not self.is_active or self._original_forward is None:
            raise RuntimeError(
                "Hook not properly attached - no original forward method available"
            )

        try:
            # Only apply prompts at the first layer (layer_idx=0)
            # This is the typical behavior for most prompt-based methods
            if self.layer_idx != 0:
                return self._original_forward(hidden_states, *args, **kwargs)

            # Don't apply prompts if using prefix tuning
            if self.use_prefix_tuning:
                return self._original_forward(hidden_states, *args, **kwargs)

            # Get batch size for prompt expansion
            batch_size = hidden_states.shape[0]

            # Get G-Prompt
            if self.shared_prompt_pool:
                g_prompt = self.g_prompts
            else:
                g_prompt = self.g_prompts[0]  # Use first layer's prompts

            # Add batch dimension if needed and expand
            if g_prompt.dim() < 3:
                g_prompt = g_prompt.unsqueeze(0).expand(batch_size, -1, -1)

            # Get E-Prompt for current task
            task_id = self.current_task_id % self.num_tasks
            if self.shared_prompt_pool:
                e_prompt = self.e_prompts[task_id]
            else:
                e_prompt = self.e_prompts[task_id, 0]  # Use first layer's prompts

            # Add batch dimension if needed and expand
            if e_prompt.dim() < 3:
                e_prompt = e_prompt.unsqueeze(0).expand(batch_size, -1, -1)

            # Combine prompts based on position
            if self.prompt_position == PromptPosition.PREPEND:
                # Concatenate prompts and then prepend to hidden states
                prompts = torch.cat([g_prompt, e_prompt], dim=1)
                new_hidden_states = torch.cat([prompts, hidden_states], dim=1)
            elif self.prompt_position == PromptPosition.APPEND:
                # Concatenate prompts and then append to hidden states
                prompts = torch.cat([g_prompt, e_prompt], dim=1)
                new_hidden_states = torch.cat([hidden_states, prompts], dim=1)
            else:
                # For unsupported positions, return unchanged
                logger.warning(f"Unsupported prompt position: {self.prompt_position}")
                new_hidden_states = hidden_states

            # Call original forward with modified hidden states
            return self._original_forward(new_hidden_states, *args, **kwargs)

        except Exception as e:
            logger.error(f"Error in dual prompt hook forward: {str(e)}")
            # Fallback to original method in case of error
            logger.warning("Falling back to original forward method")
            return self._original_forward(hidden_states, *args, **kwargs)

    def get_parameters(self) -> List[nn.Parameter]:
        """Get all trainable prompt parameters."""
        return [self.g_prompts, self.e_prompts]


class PrefixDualPromptLayerHook(BaseHook):
    """
    Hook for implementing DualPrompt with prefix tuning.

    This hook manages both general (G) and expert (E) prefixes and applies them
    to transformer attention layers.

    Args:
        module: Transformer attention module to hook
        config: DualPrompt configuration
    """

    def __init__(
        self,
        module: nn.Module,
        config: DualPromptConfig,
    ):
        super().__init__(module=module, hook_type=HookType.FORWARD_REPLACE)

        # Store configuration
        self.config = config

        # Store layer index
        self.layer_idx = getattr(config, "layer_idx", 0)

        # Initialize prefix parameters
        self.hidden_size = config.token_dim
        self.num_attention_heads = config.num_attention_heads
        self.head_dim = self.hidden_size // self.num_attention_heads
        self.g_prompt_length = config.g_prompt_length
        self.e_prompt_length = config.e_prompt_length
        self.num_tasks = config.num_tasks

        # Initialize prefix-specific parameters
        self.use_prefix_mlp = config.use_prefix_mlp
        self.prefix_mlp_hidden_size = config.prefix_mlp_hidden_size
        self.prefix_dropout = config.dropout

        # Default to task 0 until explicitly set
        self.current_task_id = 0

        # Initialize prefix parameters
        self._initialize_prefix_parameters()

        # Import here to avoid circular imports
        from continuallearning.models.pefts.prompt.hooks.attention_mechanisms import (
            AttentionMechanism,
        )

        # Detect attention mechanism
        try:
            self.attention_mechanism = AttentionMechanism.create(module)
        except ValueError as e:
            from continuallearning.models.pefts.hooks._base_hook import HookAttachError

            raise HookAttachError(f"Cannot attach dual prefix hook: {str(e)}")

    def _initialize_prefix_parameters(self) -> None:
        """Initialize prefix parameters."""
        # Calculate total prefix dimension
        total_prefix_dim = self.num_attention_heads * self.head_dim

        # Create prefix parameters based on whether we use MLP
        if self.use_prefix_mlp:
            # G-Prompts prefix parameters
            self.g_prefix_input_tokens = nn.Parameter(
                torch.randn(
                    1,  # single general prompt set
                    2,  # key and value
                    self.g_prompt_length,
                    total_prefix_dim,
                )
            )

            # MLP for G-Prompts prefix generation
            self.g_prefix_mlp = nn.Sequential(
                nn.Linear(total_prefix_dim, self.prefix_mlp_hidden_size),
                nn.Tanh(),
                nn.Linear(self.prefix_mlp_hidden_size, 2 * total_prefix_dim),
            )

            # E-Prompts prefix parameters
            self.e_prefix_input_tokens = nn.Parameter(
                torch.randn(
                    self.num_tasks,
                    2,  # key and value
                    self.e_prompt_length,
                    total_prefix_dim,
                )
            )

            # MLP for E-Prompts prefix generation
            self.e_prefix_mlp = nn.Sequential(
                nn.Linear(total_prefix_dim, self.prefix_mlp_hidden_size),
                nn.Tanh(),
                nn.Linear(self.prefix_mlp_hidden_size, 2 * total_prefix_dim),
            )

            # Dropout for prefix vectors
            self.prefix_dropout_layer = nn.Dropout(self.prefix_dropout)

            # Initialize weights
            self._init_weights()
        else:
            # Direct prefix parameters (no MLP)
            # G-Prompts prefix
            self.g_prefix_tokens = nn.Parameter(
                torch.randn(
                    1,  # single general prompt set
                    2,  # key and value
                    self.g_prompt_length,
                    self.num_attention_heads,
                    self.head_dim,
                )
            )

            # E-Prompts prefix
            self.e_prefix_tokens = nn.Parameter(
                torch.randn(
                    self.num_tasks,
                    2,  # key and value
                    self.e_prompt_length,
                    self.num_attention_heads,
                    self.head_dim,
                )
            )

            # Initialize with small values
            nn.init.uniform_(self.g_prefix_tokens, -0.1, 0.1)
            nn.init.uniform_(self.e_prefix_tokens, -0.1, 0.1)

    def _init_weights(self) -> None:
        """Initialize the weights of MLP modules."""
        for mlp in [self.g_prefix_mlp, self.e_prefix_mlp]:
            for module in mlp.modules():
                if isinstance(module, nn.Linear):
                    module.weight.data.normal_(mean=0.0, std=0.02)
                    if module.bias is not None:
                        module.bias.data.zero_()

    def get_dual_prefix_vectors(
        self, batch_size: int, task_id: Optional[int] = None
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        """
        Get combined G and E prefix vectors.

        Args:
            batch_size: Batch size for the current input
            task_id: Optional task ID (defaults to current task)

        Returns:
            Tuple[torch.Tensor, torch.Tensor]: Combined key and value prefix vectors
                with shape [batch_size, g+e_prompt_length, num_heads, head_dim]
        """
        # Use the current task ID if none provided
        if task_id is None:
            task_id = self.current_task_id

        # Ensure valid task ID
        task_id = task_id % self.num_tasks

        try:
            if self.use_prefix_mlp:
                # Process G-prefix through MLP
                g_input_tokens = self.g_prefix_input_tokens
                g_flat_input = g_input_tokens.view(1, -1)
                g_prefixes = self.g_prefix_mlp(g_flat_input)
                g_prefixes = self.prefix_dropout_layer(g_prefixes)

                # Reshape to [1, 2, g_prompt_length, num_heads, head_dim]
                g_prefixes = g_prefixes.view(
                    1,
                    2,
                    self.g_prompt_length,
                    self.num_attention_heads,
                    self.head_dim,
                )

                # Extract key and value prefixes
                g_key_prefix = g_prefixes[
                    0, 0
                ]  # [g_prompt_length, num_heads, head_dim]
                g_value_prefix = g_prefixes[0, 1]

                # Process E-prefix through MLP
                e_input_tokens = self.e_prefix_input_tokens[task_id : task_id + 1]
                e_flat_input = e_input_tokens.view(1, -1)
                e_prefixes = self.e_prefix_mlp(e_flat_input)
                e_prefixes = self.prefix_dropout_layer(e_prefixes)

                # Reshape to [1, 2, e_prompt_length, num_heads, head_dim]
                e_prefixes = e_prefixes.view(
                    1,
                    2,
                    self.e_prompt_length,
                    self.num_attention_heads,
                    self.head_dim,
                )

                # Extract key and value prefixes
                e_key_prefix = e_prefixes[
                    0, 0
                ]  # [e_prompt_length, num_heads, head_dim]
                e_value_prefix = e_prefixes[0, 1]

            else:
                # Directly use learned prefix tokens
                g_key_prefix = self.g_prefix_tokens[
                    0, 0
                ]  # [g_prompt_length, num_heads, head_dim]
                g_value_prefix = self.g_prefix_tokens[0, 1]

                e_key_prefix = self.e_prefix_tokens[
                    task_id, 0
                ]  # [e_prompt_length, num_heads, head_dim]
                e_value_prefix = self.e_prefix_tokens[task_id, 1]

            # Combine G-prefix and E-prefix
            # Concatenate along the prefix length dimension
            key_prefix = torch.cat(
                [g_key_prefix, e_key_prefix], dim=0
            )  # [g_len+e_len, num_heads, head_dim]
            value_prefix = torch.cat([g_value_prefix, e_value_prefix], dim=0)

            # Expand to batch size
            # [batch_size, prefix_length, num_heads, head_dim]
            key_prefix = key_prefix.unsqueeze(0).expand(batch_size, -1, -1, -1)
            value_prefix = value_prefix.unsqueeze(0).expand(batch_size, -1, -1, -1)

            return key_prefix, value_prefix

        except Exception as e:
            logger.error(f"Error getting dual prefix vectors: {str(e)}")
            # Return empty tensors in case of error
            empty_shape = (
                batch_size,
                self.g_prompt_length + self.e_prompt_length,
                self.num_attention_heads,
                self.head_dim,
            )
            return torch.zeros(empty_shape), torch.zeros(empty_shape)

    def _extract_hidden_states(self, args, kwargs):
        """
        Extract hidden states from function arguments.

        Args:
            args: Positional arguments to the forward method
            kwargs: Keyword arguments to the forward method

        Returns:
            torch.Tensor: The input hidden states tensor or None if not found
        """
        # Try different common keyword argument names
        for key in ["hidden_states", "input", "inputs", "x", "features", "states"]:
            if key in kwargs and isinstance(kwargs[key], torch.Tensor):
                return kwargs[key]

        # No matching keywords, try positional arguments
        if len(args) > 0:
            # Most common case: first argument is hidden states
            if isinstance(args[0], torch.Tensor):
                return args[0]
            # Handle tuple or list of tensors (common in some models)
            elif isinstance(args[0], (tuple, list)) and len(args[0]) > 0:
                if isinstance(args[0][0], torch.Tensor):
                    return args[0][0]

        # Special case: check for 'attention_inputs' pattern in some models
        if "attention_inputs" in kwargs and isinstance(
            kwargs["attention_inputs"], tuple
        ):
            for item in kwargs["attention_inputs"]:
                if isinstance(item, torch.Tensor):
                    return item

        # Could not find hidden states
        logger.warning(
            f"Could not extract hidden states from args={type(args)} kwargs={list(kwargs.keys())}"
        )
        return None

    def _hook_forward(self, *args, **kwargs) -> torch.Tensor:
        """
        Apply dual prefix-tuning to transformer attention.

        Args:
            *args: Positional arguments to the original forward method
            **kwargs: Keyword arguments to the original forward method

        Returns:
            torch.Tensor: Output with dual prefix-tuning applied
        """
        if not self.is_active or self._original_forward is None:
            raise RuntimeError(
                "Hook not properly attached - no original forward method available"
            )

        try:
            # Get hidden states from arguments
            hidden_states = self._extract_hidden_states(args, kwargs)
            if hidden_states is None:
                logger.warning("No hidden states found in input, using original method")
                return self._original_forward(*args, **kwargs)

            # Get batch size for prefix expansion
            batch_size = hidden_states.size(0)

            # Get prefix vectors for this layer
            key_prefix, value_prefix = self.get_dual_prefix_vectors(batch_size)

            # Get attention mask if provided
            attention_mask = kwargs.get("attention_mask", None)

            # Apply prefix using the appropriate attention mechanism
            return self.attention_mechanism.apply_prefix(
                hidden_states=hidden_states,
                key_prefix=key_prefix,
                value_prefix=value_prefix,
                attention_mask=attention_mask,
                **kwargs,
            )

        except Exception as e:
            logger.error(f"Error in dual prefix hook: {str(e)}")
            # Fallback to original method
            logger.warning("Falling back to original forward method")
            return self._original_forward(*args, **kwargs)

    def get_parameters(self) -> List[nn.Parameter]:
        """Get all trainable parameters for the hook."""
        if self.use_prefix_mlp:
            return [
                self.g_prefix_input_tokens,
                *self.g_prefix_mlp.parameters(),
                self.e_prefix_input_tokens,
                *self.e_prefix_mlp.parameters(),
            ]
        else:
            return [self.g_prefix_tokens, self.e_prefix_tokens]


@HOOK_FACTORY_REGISTRY.register("dual_prompt")
class DualPromptHookFactory(HookFactoryInterface):
    """
    Factory for creating dual prompt hooks.

    Args:
        config: DualPrompt configuration
        layer_indices: Mapping from module paths to layer indices
    """

    def __init__(
        self,
        config: Optional[Union[Dict[str, Any], DualPromptConfig]] = None,
        layer_indices: Optional[Dict[str, int]] = None,
        **kwargs,
    ):
        """Initialize dual prompt hook factory."""
        super().__init__(**kwargs)

        # Store layer indices
        self.layer_indices = layer_indices or {}

        # Process configuration
        from continuallearning.models.pefts.common.config_factory import (
            create_peft_config,
        )

        if config is None:
            self._config = create_peft_config("dual_prompt", kwargs)
        elif isinstance(config, dict):
            self._config = create_peft_config("dual_prompt", config)
        else:
            self._config = config

    def __call__(self, module_path: str, module: nn.Module) -> BaseHook:
        """
        Create a dual prompt hook for a specific module.

        Args:
            module_path: Path to the module in the model
            module: Module to hook

        Returns:
            BaseHook: Appropriate hook instance for this module
        """
        # Get layer index from path
        layer_idx = self.layer_indices.get(module_path, 0)

        # Update config with layer index
        config_dict = self._config.to_dict()
        config_dict.update({"layer_idx": layer_idx})

        # Create task-specific config
        from continuallearning.models.pefts.common.config_factory import (
            create_peft_config,
        )

        layer_config = create_peft_config("dual_prompt", config_dict)

        # Check if we should create a prefix hook or standard hook
        # This tries to determine if this is an attention module
        is_attention_module = (
            (
                hasattr(module, "query")
                and hasattr(module, "key")
                and hasattr(module, "value")
            )
            or (
                hasattr(module, "q_proj")
                and hasattr(module, "k_proj")
                and hasattr(module, "v_proj")
            )
            or hasattr(module, "query_key_value")
            or hasattr(module, "c_attn")
        )

        # Create and return appropriate hook type
        if layer_config.use_prefix_tuning and is_attention_module:
            return PrefixDualPromptLayerHook(module=module, config=layer_config)
        else:
            return DualPromptLayerHook(module=module, config=layer_config)

    @property
    def config(self) -> DualPromptConfig:
        """Get the dual prompt configuration."""
        return self._config


@HOOK_MANAGER_REGISTRY.register("dual_prompt")
class DualPromptHookManager(UnifiedHookManager):
    """
    Manager for dual prompt hooks across a model.

    Args:
        model: Model to apply dual prompt to
        config: Configuration for hook creation and detection
        layer_indices: Mapping from module paths to layer indices
    """

    def __init__(
        self,
        model: nn.Module,
        config: Optional[Union[Dict[str, Any], DualPromptConfig]] = None,
        layer_indices: Optional[Dict[str, int]] = None,
    ):
        """Initialize dual prompt hook manager."""
        # Process configuration
        from continuallearning.models.pefts.common.config_factory import (
            create_peft_config,
        )

        if config is None:
            dual_prompt_config = create_peft_config("dual_prompt", {})
        elif isinstance(config, dict):
            dual_prompt_config = create_peft_config("dual_prompt", config)
        else:
            dual_prompt_config = config

        # Create hook factory
        factory = DualPromptHookFactory(
            config=dual_prompt_config, layer_indices=layer_indices
        )

        # Configure module detection
        detection_config = {
            "find_transformer_layers": True,
            "target_patterns": [
                "encoder.layer",
                "blocks",
                "transformer.h",
                "attention",
                "attn",
            ],
        }

        # Initialize manager
        super().__init__(model, factory, detection_config)

    def set_task(self, task_id: int) -> None:
        """
        Set the current task for all hooks.

        Args:
            task_id: Task identifier
        """
        for hook in self._hooks.values():
            if isinstance(hook, (DualPromptLayerHook, PrefixDualPromptLayerHook)):
                hook.current_task_id = task_id
