"""
DualPrompt implementation for continual learning.

This module implements the DualPrompt method from:
"DualPrompt: Complementary Prompting for Rehearsal-Free Continual Learning" (Wang et al., ECCV 2022)

It provides extensions to also support prefix tuning for enhanced performance.
"""

from typing import List, Optional, Dict, Tuple

import torch
import torch.nn as nn
from continuallearning.interfaces.models.adapter import AdapterType
from continuallearning.interfaces.types import AdapterOutput
from continuallearning.registry import HOOK_ADAPTER_REGISTRY
from continuallearning.interfaces.models.prompt_interfaces import PromptPosition
from continuallearning.interfaces.models.hook_interfaces import HookManagerInterface
from continuallearning.models.pefts.prompt.modules.base_prompt_adapter import (
    BasePromptAdapter,
)
from continuallearning.models.backbones.base import BaseBackbone
from continuallearning.models.pefts.common.config import DualPromptConfig
from continuallearning.models.pefts.common.config_factory import create_peft_config

# 添加自定义日志系统导入
from continuallearning.utils.logging import get_logger

# 创建日志记录器
logger = get_logger(__name__)


@HOOK_ADAPTER_REGISTRY.register()
class DualPromptAdapter(BasePromptAdapter):
    """
    DualPrompt adapter implementation.

    DualPrompt combines general (G) prompts that are shared across all tasks
    with expert (E) prompts that are task-specific, providing a balance between
    task-general and task-specific knowledge.

    This implementation is extended to support prefix tuning, which directly
    modifies key and value matrices in attention layers rather than adding
    tokens to the input sequence.

    Args:
        backbone: Backbone model to adapt
        hidden_size: Dimensionality of the model
        num_layers: Number of layers in the backbone model
        g_prompt_length: Number of tokens in general prompts (default: 5)
        e_prompt_length: Number of tokens in expert prompts (default: 5)
        num_tasks: Number of tasks for expert prompts (default: 10)
        prompt_init: Initialization method for prompts (default: "random")
        preseqlen: Length for prefix sequence if using prefix tuning (default: 5)
        shared_prompt_pool: Whether to share prompts across layers (default: False)
        adaptive_position: Whether to adaptively determine prompt position (default: False)
        use_prefix_tuning: Whether to use prefix tuning instead of input prompting (default: False)
        num_attention_heads: Number of attention heads in model (for prefix tuning) (default: 12)
        use_prefix_mlp: Whether to use MLP for prefix generation (default: True)
        prefix_mlp_hidden_size: Hidden size of prefix MLP (default: 512)
        prefix_dropout: Dropout rate for prefix vectors (default: 0.1)
    """

    def __init__(
        self,
        backbone: BaseBackbone,
        hidden_size: int,
        num_layers: int,
        g_prompt_length: int = 5,
        e_prompt_length: int = 5,
        num_tasks: int = 10,
        prompt_init: str = "random",
        preseqlen: int = 5,
        shared_prompt_pool: bool = False,
        adaptive_position: bool = False,
        use_prefix_tuning: bool = False,
        num_attention_heads: int = 12,
        use_prefix_mlp: bool = True,
        prefix_mlp_hidden_size: int = 512,
        prefix_dropout: float = 0.1,
        adapter_type: AdapterType = AdapterType.POST_BACKBONE,
    ):
        # Initialize base class
        super().__init__(
            backbone=backbone,
            hidden_size=hidden_size,
            num_layers=num_layers,
            # Use g_prompt_length as the base num_prompts
            num_prompts=g_prompt_length,
            prompt_position=PromptPosition.PREPEND,
            adapter_type=adapter_type,
        )

        # Store basic configuration
        self.g_prompt_length = g_prompt_length
        self.e_prompt_length = e_prompt_length
        self.num_tasks = num_tasks
        self.preseqlen = preseqlen
        self.shared_prompt_pool = shared_prompt_pool
        self.adaptive_position = adaptive_position
        self.prompt_init = prompt_init

        # Prefix tuning configuration
        self.use_prefix_tuning = use_prefix_tuning
        self.num_attention_heads = num_attention_heads
        self.head_dim = hidden_size // num_attention_heads
        self.use_prefix_mlp = use_prefix_mlp
        self.prefix_dropout = prefix_dropout
        self.prefix_mlp_hidden_size = prefix_mlp_hidden_size

        # Hook management
        self._hook_managers = {}
        self._attached_model = None

        # Deprecation warning - parameters are now managed by hooks
        logger.warning(
            "DualPromptAdapter direct parameter management is deprecated. "
            "Parameters are now managed by hooks in the new architecture."
        )

    @property
    def num_prompts(self) -> int:
        """Get the total number of prompt tokens used."""
        return self.g_prompt_length + self.e_prompt_length

    def adapt_representation(
        self,
        hidden_states: torch.Tensor,
        task_id: Optional[int] = None,
        layer_idx: Optional[int] = None,
        **kwargs,
    ) -> torch.Tensor:
        """
        Apply adaptation to model representations (hidden states).

        In the new hook-based architecture, this is a pass-through method
        as the prompting logic is handled by hooks.

        Args:
            hidden_states: Input hidden states [batch_size, seq_len, hidden_dim]
            task_id: Optional task identifier
            layer_idx: Optional layer index
            **kwargs: Additional adapter-specific arguments

        Returns:
            torch.Tensor: Hidden states (pass-through)
        """
        # Set the current task if specified
        if task_id is not None:
            self.prepare_task(task_id)

        # For dual prompt, adaptation happens in the hooks
        return hidden_states

    def forward(
        self, x: torch.Tensor, task_id: Optional[int] = None, **kwargs
    ) -> AdapterOutput:
        """
        Forward pass with DualPrompt.

        Uses unified Hook mechanism to apply prompts during transformer layer computations.

        Args:
            x: Input tensor
            task_id: Task identifier (optional)
            **kwargs: Additional arguments for the backbone

        Returns:
            AdapterOutput: Output with features adapted by prompts
        """
        # Set the current task if provided
        if task_id is not None:
            self.set_task(task_id)

        # Attach hooks to the model
        self.attach(
            self.backbone.model if hasattr(self.backbone, "model") else self.backbone
        )

        try:
            # Run the backbone forward pass with hooks in place
            outputs = self.backbone(x, **kwargs)

            # Standardize output format
            if isinstance(outputs, dict) and "features" in outputs:
                features = outputs["features"]
            elif isinstance(outputs, torch.Tensor):
                features = outputs
            else:
                # Use input tensor as fallback
                logger.warning(
                    "Could not extract features from output, using input tensor"
                )
                features = x

            return AdapterOutput(
                features=features,
                adapter_hidden_states=None,
            )
        finally:
            # Always clean up hooks
            self.detach()

    def attach(
        self, model: nn.Module, task_id: Optional[int] = None
    ) -> HookManagerInterface:
        """
        Attach DualPrompt hooks to a model.

        Args:
            model: Model to attach hooks to
            task_id: Optional task identifier

        Returns:
            HookManagerInterface: Hook manager
        """
        # Use task_id if provided, otherwise use current task
        task_id = self.current_task if task_id is None else task_id

        # Create mapping from layer pattern to index
        layer_indices = {}

        # Create configuration dict
        config_dict = {
            "token_dim": self.hidden_size,
            "num_layers": self.num_layers,
            "g_prompt_length": self.g_prompt_length,
            "e_prompt_length": self.e_prompt_length,
            "num_tasks": self.num_tasks,
            "prompt_init": self.prompt_init,
            "shared_prompt_pool": self.shared_prompt_pool,
            "use_prefix_tuning": self.use_prefix_tuning,
            "num_attention_heads": self.num_attention_heads,
            "use_prefix_mlp": self.use_prefix_mlp,
            "prefix_mlp_hidden_size": self.prefix_mlp_hidden_size,
            "dropout": self.prefix_dropout,
            "prompt_position": self.prompt_position.value,
        }

        # Create configuration object
        config = create_peft_config("dual_prompt", config_dict)

        # Import hook manager here to avoid circular imports
        from continuallearning.registry import HOOK_MANAGER_REGISTRY

        hook_manager_cls = HOOK_MANAGER_REGISTRY.get("dual_prompt")

        # Create and attach hooks
        hook_manager = hook_manager_cls(
            model=model, config=config, layer_indices=layer_indices
        )

        # Set current task in hook manager
        if hasattr(hook_manager, "set_task"):
            hook_manager.set_task(task_id)

        # Store hook manager
        self._hook_managers[task_id] = hook_manager
        self._attached_model = model

        return hook_manager

    def detach(self) -> None:
        """
        Detach all hooks from the model.
        """
        for manager in self._hook_managers.values():
            for hook in manager.get_hooks().values():
                hook.detach()
        self._attached_model = None

    def get_trainable_parameters(
        self, task_id: Optional[int] = None
    ) -> List[nn.Parameter]:
        """
        Get trainable parameters for the current task.

        Args:
            task_id: Optional task ID. If None, uses current task.

        Returns:
            List[nn.Parameter]: List of trainable parameters
        """
        if task_id is None:
            task_id = self.current_task

        # If we have active hook managers, get parameters from them
        if task_id in self._hook_managers:
            hook_manager = self._hook_managers[task_id]
            return hook_manager.get_trainable_parameters()

        # Fallback: if hooks aren't attached yet, return empty list
        logger.warning(
            "No hook manager available, returning empty parameter list. "
            "Make sure to attach hooks before training."
        )
        return []
