"""
Advanced Learning to Prompt for Continual Learning (L2P-Advanced) implementation.

This module implements an enhanced version of L2P method from:
"Learning To Prompt for Continual Learning" (Wang et al., CVPR 2022)

The enhancement allows for using different prompt pools at different layers,
rather than only applying prompts at the first layer.
"""

from typing import Dict, List, Optional, Tuple

import torch
import torch.nn as nn
import torch.nn.functional as F

from continuallearning.interfaces.models.adapter import AdapterType
from continuallearning.interfaces.types import AdapterOutput
from continuallearning.registry import HOOK_ADAPTER_REGISTRY
from continuallearning.interfaces.models.prompt_interfaces import PromptPosition
from continuallearning.models.pefts.prompt.modules.base_prompt_adapter import (
    BasePromptAdapter,
)
from continuallearning.models.backbones.base import BaseBackbone

# 替换标准logging库为自定义日志系统
from continuallearning.utils.logging import get_logger

# 使用自定义日志系统创建记录器
logger = get_logger(__name__)


@HOOK_ADAPTER_REGISTRY.register()
class L2PAdvancedAdapter(BasePromptAdapter):
    """
    Advanced Learning to Prompt (L2P) adapter implementation.

    This enhanced version of L2P supports:
    1. Layer-specific prompt pools
    2. Applying prompts at multiple layers, not just the first layer
    3. Per-layer prompt selection strategies

    Args:
        backbone: Backbone model to adapt
        hidden_size: Dimensionality of the model
        num_layers: Number of layers in the backbone model
        pool_size: Size of the prompt pool (default: 100)
        num_tokens: Number of tokens per prompt (default: 5)
        top_k: Number of prompts to select (default: 5)
        initialization: Initialization method for prompts (default: "random")
        key_initialization: Initialization method for prompt keys (default: "random")
        target_layers: List of layer indices to apply prompts (default: None, applies to first layer only)
        shared_across_layers: Whether to share the prompt pool across layers (default: False)
        per_layer_top_k: Whether to use different top_k for different layers (default: None)
        adapter_type: Type of adapter (default: AdapterType.POST_BACKBONE)
    """

    def __init__(
        self,
        backbone: BaseBackbone,
        hidden_size: int,
        num_layers: int,
        pool_size: int = 100,
        num_tokens: int = 5,
        top_k: int = 5,
        initialization: str = "random",
        key_initialization: str = "random",
        target_layers: Optional[List[int]] = None,
        shared_across_layers: bool = False,
        per_layer_top_k: Optional[Dict[int, int]] = None,
        adapter_type: AdapterType = AdapterType.POST_BACKBONE,
    ):
        """
        Initialize the L2P Advanced adapter.

        Args:
            backbone: Backbone model to adapt
            hidden_size: Dimensionality of the model
            num_layers: Number of layers in the backbone model
            pool_size: Size of the prompt pool
            num_tokens: Number of tokens per prompt
            top_k: Number of prompts to select
            initialization: Initialization method for prompts
            key_initialization: Initialization method for prompt keys
            target_layers: List of layer indices to apply prompts (defaults to [0])
            shared_across_layers: Whether to share the prompt pool across layers
            per_layer_top_k: Dictionary mapping layer indices to their top_k values
            adapter_type: Type of adapter
        """
        # Initialize with BasePromptAdapter
        super().__init__(
            backbone=backbone,
            hidden_size=hidden_size,
            num_layers=num_layers,
            num_prompts=num_tokens,
            prompt_position=PromptPosition.PREPEND,
            adapter_type=adapter_type,
        )

        self.pool_size = pool_size
        self.prompt_length = num_tokens  # For backward compatibility
        self.prompt_layers = target_layers or [0]  # Default to first layer only
        self.shared_prompt_pool = shared_across_layers

        # Setup per-layer top_k values
        self.top_k = {}
        default_top_k = min(top_k, pool_size)

        if per_layer_top_k:
            for layer_idx in self.prompt_layers:
                self.top_k[layer_idx] = min(
                    per_layer_top_k.get(layer_idx, default_top_k), pool_size
                )
        else:
            for layer_idx in self.prompt_layers:
                self.top_k[layer_idx] = default_top_k

        # Initialize prompt pools and keys
        if shared_across_layers:
            # Single pool and keys for all layers
            self.prompt_pool = self._initialize_prompt_pool(initialization)
            self.prompt_keys = self._initialize_prompt_keys(key_initialization)
        else:
            # Per-layer pools and keys
            self.prompt_pools = nn.ParameterDict()
            self.prompt_keys_dict = nn.ParameterDict()

            for layer_idx in self.prompt_layers:
                layer_str = str(layer_idx)  # Convert to string for ParameterDict keys
                self.prompt_pools[layer_str] = self._initialize_prompt_pool(
                    initialization, layer_idx
                )
                self.prompt_keys_dict[layer_str] = self._initialize_prompt_keys(
                    key_initialization, layer_idx
                )

        # Cache for features and selected prompt indices during forward pass
        self._cached_features = None
        self._last_prompt_indices = {}  # Dict to store indices for each layer

    @property
    def num_prompts(self) -> int:
        """Get the number of prompt tokens used."""
        return self.prompt_length

    @property
    def last_prompt_indices(self) -> Dict[int, torch.Tensor]:
        """Get the indices of the last selected prompts for each layer."""
        return {int(k): v for k, v in self._last_prompt_indices.items()}

    def _initialize_prompt_pool(
        self, init_method: str, layer_idx: Optional[int] = None
    ) -> nn.Parameter:
        """
        Initialize a prompt pool for a specific layer or shared across layers.

        Args:
            init_method: Initialization method for prompts ("zero" or "random")
            layer_idx: Optional layer index for layer-specific initialization

        Returns:
            nn.Parameter: Initialized prompt pool parameter
        """
        if init_method == "zero":
            prompt_pool = torch.zeros(
                self.pool_size, self.prompt_length, self._hidden_size
            )
        elif init_method == "random":
            prompt_pool = torch.randn(
                self.pool_size, self.prompt_length, self._hidden_size
            )
            # Scale for stable initialization
            prompt_pool = prompt_pool * 0.02
        else:
            raise ValueError(f"Unknown prompt initialization method: {init_method}")

        return nn.Parameter(prompt_pool)

    def _initialize_prompt_keys(
        self, init_method: str, layer_idx: Optional[int] = None
    ) -> nn.Parameter:
        """
        Initialize prompt keys for a specific layer or shared across layers.

        Args:
            init_method: Initialization method for keys ("zero" or "random")
            layer_idx: Optional layer index for layer-specific initialization

        Returns:
            nn.Parameter: Initialized prompt keys parameter
        """
        if init_method == "zero":
            prompt_keys = torch.zeros(self.pool_size, self._hidden_size)
        elif init_method == "random":
            prompt_keys = torch.randn(self.pool_size, self._hidden_size)
            prompt_keys = prompt_keys * 0.02
        else:
            raise ValueError(f"Unknown prompt key initialization method: {init_method}")

        return nn.Parameter(prompt_keys)

    def get_prompt_params(self) -> List[nn.Parameter]:
        """
        Get all trainable prompt parameters.

        Returns:
            List[nn.Parameter]: List containing prompt pool and keys parameters for all layers
        """
        params = []

        if self.shared_prompt_pool:
            params.extend([self.prompt_pool, self.prompt_keys])
        else:
            for layer_str in self.prompt_pools:
                params.append(self.prompt_pools[layer_str])
                params.append(self.prompt_keys_dict[layer_str])

        return params

    def get_prompt_pool_for_layer(self, layer_idx: int) -> nn.Parameter:
        """
        Get the prompt pool for a specific layer.

        Args:
            layer_idx: Layer index

        Returns:
            nn.Parameter: Prompt pool for the specified layer
        """
        if self.shared_prompt_pool:
            return self.prompt_pool
        else:
            layer_str = str(layer_idx)
            return self.prompt_pools[layer_str]

    def get_prompt_keys_for_layer(self, layer_idx: int) -> nn.Parameter:
        """
        Get the prompt keys for a specific layer.

        Args:
            layer_idx: Layer index

        Returns:
            nn.Parameter: Prompt keys for the specified layer
        """
        if self.shared_prompt_pool:
            return self.prompt_keys
        else:
            layer_str = str(layer_idx)
            return self.prompt_keys_dict[layer_str]

    def _select_prompts_for_layer(
        self, features: torch.Tensor, layer_idx: int, task_id: Optional[int] = None
    ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        """
        Select prompts from the pool for a specific layer based on input features.

        Args:
            features: Input features [batch_size, hidden_dim]
            layer_idx: Layer index
            task_id: Optional task identifier

        Returns:
            Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
                - Selected prompts [batch_size, prompt_length, hidden_dim]
                - Selection weights [batch_size, top_k]
                - Selected indices [batch_size, top_k]
        """
        # Get layer-specific prompt pool and keys
        prompt_pool = self.get_prompt_pool_for_layer(layer_idx)
        prompt_keys = self.get_prompt_keys_for_layer(layer_idx)

        # Get layer-specific top-k value
        top_k = self.top_k[layer_idx]

        # Compute similarity between features and prompt keys
        # [batch_size, pool_size]
        similarity = F.normalize(features, dim=1) @ F.normalize(prompt_keys, dim=1).t()

        # Select top-k prompts
        weights, selected_idx = torch.topk(similarity, k=top_k, dim=1)
        weights = F.softmax(weights, dim=1)  # [batch_size, top_k]

        # Store the selected indices for analysis/visualization
        self._last_prompt_indices[str(layer_idx)] = selected_idx

        # Gather selected prompts
        # [batch_size, top_k, prompt_length, hidden_dim]
        selected_prompts = prompt_pool[selected_idx]

        # Apply weights
        # [batch_size, top_k, 1, 1] * [batch_size, top_k, prompt_length, hidden_dim]
        selected_prompts = weights.unsqueeze(-1).unsqueeze(-1) * selected_prompts

        # Sum over top_k dimension
        # [batch_size, prompt_length, hidden_dim]
        selected_prompts = selected_prompts.sum(dim=1)

        return selected_prompts, weights, selected_idx

    def adapt_representation(
        self,
        hidden_states: torch.Tensor,
        task_id: Optional[int] = None,
        layer_idx: Optional[int] = None,
        **kwargs,
    ) -> torch.Tensor:
        """
        Apply adaptation to model representations at a specific layer.

        Args:
            hidden_states: Input hidden states [batch_size, seq_len, hidden_dim]
            task_id: Optional task identifier
            layer_idx: Layer index (0-based)
            **kwargs: Additional adapter-specific arguments

        Returns:
            torch.Tensor: Hidden states with prompts applied
        """
        if layer_idx is None:
            logger.warning("Layer index not provided for L2PAdvancedAdapter")
            return hidden_states

        # Only apply prompts to specified layers
        if layer_idx not in self.prompt_layers:
            return hidden_states

        batch_size = hidden_states.shape[0]

        # Use cached features for prompt selection if available
        if hasattr(self, "_cached_features") and self._cached_features is not None:
            features = self._cached_features
        else:
            # Extract features from CLS token (first token) as fallback
            features = hidden_states[:, 0]
            logger.warning(
                f"No cached features available for L2P prompt selection at layer {layer_idx}. "
                "Using first token from hidden states."
            )

        # Select prompts for this specific layer
        selected_prompts, _, _ = self._select_prompts_for_layer(
            features, layer_idx, task_id
        )

        # Apply prompts based on position
        if self.prompt_position == PromptPosition.PREPEND:
            # Prepend prompts to sequence
            return torch.cat([selected_prompts, hidden_states], dim=1)
        elif self.prompt_position == PromptPosition.APPEND:
            # Append prompts to sequence
            return torch.cat([hidden_states, selected_prompts], dim=1)
        else:
            raise ValueError(f"Unsupported prompt position: {self.prompt_position}")

    def forward(
        self, x: torch.Tensor, task_id: Optional[int] = None, **kwargs
    ) -> AdapterOutput:
        """
        Forward pass with L2P-Advanced adapter.

        This method first extracts features from the input to select prompts,
        then attaches hooks to inject the selected prompts during model forward pass.

        Args:
            x: Input tensor
            task_id: Optional task identifier
            **kwargs: Additional arguments for the backbone

        Returns:
            AdapterOutput: Adapter output with features from prompted model
        """
        # Set the current task if specified
        if task_id is not None:
            self.set_task(task_id)

        # Clear the previous indices
        self._last_prompt_indices = {}

        # First get features from the model for prompt selection
        # We use a quick forward pass to get these features
        with torch.no_grad():
            if hasattr(self.backbone, "extract_features"):
                # Use extract_features if available (more efficient)
                self._cached_features = self.backbone.extract_features(x, **kwargs)
            else:
                # Fall back to running the full model
                try:
                    outputs = self.backbone(x, **kwargs)
                    if isinstance(outputs, dict) and "features" in outputs:
                        self._cached_features = outputs["features"]
                    elif (
                        hasattr(outputs, "pooler_output")
                        and outputs.pooler_output is not None
                    ):
                        self._cached_features = outputs.pooler_output
                    elif hasattr(outputs, "last_hidden_state"):
                        self._cached_features = outputs.last_hidden_state[:, 0]
                    else:
                        self._cached_features = x.mean(dim=1)  # Fallback
                        logger.warning(
                            "Could not extract features from model output, using mean pooling."
                        )
                except Exception as e:
                    logger.error(f"Error extracting features for L2P-Advanced: {e}")
                    self._cached_features = x.mean(dim=1)  # Fallback

        # Now do the actual forward pass with hooks that will use the cached features
        self.attach(
            self.backbone.model if hasattr(self.backbone, "model") else self.backbone
        )

        try:
            # Run forward pass with hooks in place
            output = self.backbone(x, **kwargs)

            # Standardize output format
            if isinstance(output, dict) and "features" in output:
                features = output["features"]
            elif isinstance(output, torch.Tensor):
                features = output
            elif hasattr(output, "features"):
                features = output.features
            elif hasattr(output, "last_hidden_state"):
                features = output.last_hidden_state[:, 0]  # Use CLS token
            else:
                logger.warning(
                    "Could not extract features from output, using cached features"
                )
                features = self._cached_features

            return AdapterOutput(features=features, adapter_hidden_states=None)
        except Exception as e:
            logger.error(f"Error in L2P-Advanced forward pass: {e}")
            # In case of error, try to return something useful
            return AdapterOutput(features=self._cached_features)
        finally:
            # Always clean up hooks
            self.detach()
            # Clear the feature cache after use
            if hasattr(self, "_cached_features") and self._cached_features is not None:
                del self._cached_features
                self._cached_features = None

    def _create_hook_manager(self, model: nn.Module, task_id: int):
        """
        Create a hook manager for a specific task.

        Args:
            model: Model to apply hooks to
            task_id: Task identifier

        Returns:
            HookManagerInterface: Hook manager for the adapter
        """
        from ..prompt.prompt_hooks import PromptHookManager

        # Create and return PromptHookManager instance with layer-specific config
        return PromptHookManager(
            model=model,
            adapter=self,
            config={
                "num_tokens": self.prompt_length,
                "position": self.prompt_position.name.lower(),
                "target_layers": self.prompt_layers,  # Pass the layers to apply prompts
            },
        )
