"""
L2p-specific hooks for parameter-efficient fine-tuning.

This module provides hooks specifically designed for L2p (Low-Rank Adaptation)
that apply low-rank updates to linear transformations in neural networks.
"""

import math
from typing import Any, Dict, List, Optional, Union

import torch
import torch.nn as nn
import torch.nn.functional as F

from continuallearning.interfaces import CombinerInterface, HookType, RouterInterface
from continuallearning.models.pefts.common.config.config import L2pConfig
from continuallearning.models.pefts.common.config.config_factory import (
    create_peft_config,
)
from continuallearning.models.pefts.common.context import (
    get_backbone_feature,
    get_current_task_ids,
)
from continuallearning.models.pefts.common.utils.error_handlers import (
    handle_peft_errors,
    validate_tensor_operation,
)
from continuallearning.models.pefts.common.utils.exceptions import (
    ForwardPassError,
    HookAttachError,
)
from continuallearning.models.pefts.common.utils.utils import is_linear_module
from continuallearning.models.pefts.hooks._base_hook import (
    BaseHook,
    BaseHookFactory,
    BaseTaskAwareHook,
)
from continuallearning.registry import HOOK_FACTORY_REGISTRY, HOOK_REGISTRY
from continuallearning.utils.logging import get_logger

# from continuallearning.models.pefts.hook_managers.hook_manager import UnifiedHookManager

# 使用自定义日志系统创建记录器
logger = get_logger(__name__)


class L2pLayerHook(BaseHook[L2pConfig]):
    """
    Hook for applying L2p (Low-Rank Adaptation) to linear layers.

    L2p replaces a weight matrix W with W + AB where A and B are low-rank matrices.
    This hook adds trainable low-rank adapters to any linear transformation module.

    Args:
        module: Linear module to adapt
        config: L2p configuration
    """

    @handle_peft_errors(error_type="hook", collect_context=True)
    def __init__(
        self,
        *,
        module: nn.Module,
        hook_config: L2pConfig,
        **kwargs,
    ):
        # 首先调用父类的 __init__
        super().__init__(module=module, hook_config=hook_config, **kwargs)

        self.hook_config: L2pConfig = hook_config
        self._hook_type: HookType = HookType.FORWARD_REPLACE
        # Extract shapes for weight matrix
        self._shared_prompt_pool = hook_config.shared_prompt_pool
        self.prompt_length = hook_config.prompts_per_hook
        self.prompt_dim = hook_config.prompt_dim
        # Initialize prompt parameters
        self.prompts = nn.Parameter(
            torch.zeros(
                (self.prompt_length, self.prompt_dim),
                dtype=torch.float32,
                device=self.module.weight.device,
            )
        )
        self.weights_initializer = self.hook_config.weights_initializer
        self._init_weights()
        # Optional dropout layer
        self.dropout = (
            nn.Dropout(self.hook_config.dropout)
            if self.hook_config.dropout > 0.0
            else None
        )

    def _init_weights(
        self,
    ) -> None:
        """Initialize router weights using Xavier/Glorot initialization.

        Args:
            weights: Parameter tensor to initialize
        """
        if self.weights_initializer == "kaiming_uniform_":
            nn.init.kaiming_uniform_(self.prompts, a=math.sqrt(5))
        elif self.weights_initializer == "xavier_uniform_":
            nn.init.xavier_uniform_(self.prompts)
        elif self.weights_initializer == "normal_":
            nn.init.normal_(self.prompts, mean=0.0, std=1.0)
        else:
            raise ValueError(f"Unknown weights initializer: {self.weights_initializer}")

    @handle_peft_errors(error_type="forward", collect_context=True)
    @validate_tensor_operation("L2p forward", expected_dims=None)
    def _forward(self, x, *args, **kwargs) -> torch.Tensor:
        """
        Apply L2p transformation to the input tensor.
        """
        batch_size = x.shape[0]
        output = self.prompts
        # Expand prompts to match batch dimension
        output = output.unsqueeze(0).expand(batch_size, -1, -1)
        if self.dropout is not None:
            output = self.dropout(output)
        return output

    @handle_peft_errors(error_type="forward", collect_context=True)
    def _hook_forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
        """
        Apply L2p transformation and call original forward method.

        This implements W' = W + BA scaling, where:
        - W is the original weight matrix
        - B and A are low-rank matrices

        Args:
            x: Input tensor
            *args, **kwargs: Additional arguments for the original layer

        Returns:
            torch.Tensor: Transformed output tensor directly

        Raises:
            RuntimeError: If the hook is not properly attached
        """
        if not self.is_attached:
            logger.debug("L2p hook is not active, passing through to original forward")
            if self._original_forward is None:
                raise ForwardPassError(
                    "Hook not properly attached - original forward method not available",
                    adapter_type="L2p",
                )
            return self._original_forward(x, *args, **kwargs)

        if self._original_forward is None:
            raise ForwardPassError(
                "Hook not properly attached - original forward method not available",
                adapter_type="L2p",
            ).add_context(
                module_type=type(self.module).__name__,
                module_path=getattr(self.module, "_module_path", "unknown"),
            )

        # Get batch size
        batch_size = x.shape[0]

        # Get prompts with optional dropout
        prompts = self._forward(x)  # Shape: (prompt_length, prompt_dim)

        # Expand prompts for batch dimension
        # From (prompt_length, prompt_dim) to (batch_size, prompt_length, prompt_dim)
        expanded_prompts = prompts.unsqueeze(0).expand(batch_size, -1, -1)

        # Concatenate prompts with input
        # x shape: (batch_size, seq_len, hidden_dim)
        # expanded_prompts shape: (batch_size, prompt_length, prompt_dim)
        # Ensure dimensions match
        if x.dim() == 3:
            # Token-level input (e.g., transformer)
            if x.shape[-1] != self.prompt_dim:
                raise ForwardPassError(
                    f"Dimension mismatch: input dim {x.shape[-1]} != prompt dim {self.prompt_dim}",
                    adapter_type="L2P",
                )
            # Concatenate along sequence dimension
            prompted_input = torch.cat([expanded_prompts, x], dim=1)
        elif x.dim() == 2:
            # Feature-level input (e.g., MLP)
            # Reshape to 3D for concatenation
            x_reshaped = x.unsqueeze(1)  # (batch_size, 1, hidden_dim)
            if x.shape[-1] != self.prompt_dim:
                raise ForwardPassError(
                    f"Dimension mismatch: input dim {x.shape[-1]} != prompt dim {self.prompt_dim}",
                    adapter_type="L2P",
                )
            prompted_input = torch.cat([expanded_prompts, x_reshaped], dim=1)
        else:
            raise ForwardPassError(
                f"Unsupported input dimension: {x.dim()}. Expected 2D or 3D tensor.",
                adapter_type="L2P",
            )

        # Pass through original forward
        prompted_output = self._original_forward(prompted_input, *args, **kwargs)

        # Remove prompt tokens from output
        if prompted_output.dim() == 3:
            # Remove first prompt_length tokens
            output = prompted_output[:, self.prompt_length :, :]
            # If original input was 2D, squeeze back
            if x.dim() == 2:
                output = output.squeeze(1)
        elif prompted_output.dim() == 2:
            # For 2D outputs (e.g., classification head), return as is
            # The prompt tokens have already influenced the computation
            output = prompted_output
        else:
            raise ForwardPassError(
                f"Unsupported output dimension: {prompted_output.dim()}",
                adapter_type="L2P",
            )

        return output


# FIXME 考虑shared_prompt_pool=true的情况
@HOOK_REGISTRY.register("l2p")
class L2pTaskAwareHook(BaseTaskAwareHook[L2pLayerHook]):
    """
    Task-aware hook for L2p that applies low-rank updates to linear layers.

    This hook extends the L2pLayerHook to support task-specific adaptations,
    allowing different tasks to have their own low-rank matrices.

    Args:
        module: Linear module to adapt
        config: L2p configuration
        task_id: Identifier for the current task
    """

    _hook_func = L2pLayerHook

    @handle_peft_errors(error_type="hook", collect_context=True)
    def __init__(
        self,
        router: RouterInterface,
        combiner: CombinerInterface,
        module: nn.Module,
        hook_config: Optional[L2pConfig] = None,
    ):
        super().__init__(
            router=router, combiner=combiner, module=module, hook_config=hook_config
        )

    @handle_peft_errors(error_type="forward", collect_context=True)
    def _hook_forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
        """
        Apply L2p transformation and call original forward method.

        This implements W' = W + BA scaling, where:
        - W is the original weight matrix
        - B and A are low-rank matrices

        Args:
            x: Input tensor
            *args, **kwargs: Additional arguments for the original layer

        Returns:
            torch.Tensor: Transformed output tensor directly

        Raises:
            RuntimeError: If the hook is not properly attached
        """
        if not self.is_attached:
            logger.debug("L2p hook is not active, passing through to original forward")
            if self._original_forward is None:
                raise ForwardPassError(
                    "Hook not properly attached - original forward method not available",
                    adapter_type="L2p",
                )
            return self._original_forward(x, *args, **kwargs)

        if self._original_forward is None:
            raise ForwardPassError(
                "Hook not properly attached - original forward method not available",
                adapter_type="L2p",
            ).add_context(
                module_type=type(self.module).__name__,
                module_path=getattr(self.module, "_module_path", "unknown"),
            )

        context_task_ids = get_current_task_ids()
        bk_feature = get_backbone_feature(identifier=-1)
        l2p_prompts = self.forward(
            x, task_ids=context_task_ids, bk_feature=bk_feature, *args, **kwargs
        )
        if x.shape[-1] != self.prompt_dim:
            raise ForwardPassError(
                f"Dimension mismatch: input dim {x.shape[-1]} != prompt dim {self.prompt_dim}",
                adapter_type="L2P",
            )
        # Concatenate along sequence dimension
        prompted_input = torch.cat([l2p_prompts, x], dim=1)
        # Pass through original forward
        prompted_output = self._original_forward(prompted_input, *args, **kwargs)

        # Remove prompt tokens from output
        output = prompted_output[:, l2p_prompts.size(1) :, :]

        return output

    def process_with_task(
        self, x: torch.Tensor, task_id: int, *args, **kwargs
    ) -> torch.Tensor:
        """Process input with a specific task."""
        bk_feature = kwargs.get("bk_feature", None)
        assert bk_feature is not None, "Backbone feature must be provided"

        if str(task_id) not in self._hooks:
            raise ValueError(f"Task {task_id} is not registered")
        task_ids = [task_id]
        weights = self.router.route(x, task_ids=task_ids, *args, **kwargs)

        # Compute outputs only for specified tasks
        o_hook = {}
        for task_id in sorted(task_ids):
            task_key = str(task_id)
            if task_key in self._hooks:
                o_hook[task_id] = self._hooks[task_key].forward(x, *args, **kwargs)

        # Combine outputs with weights
        o = self.combiner.combine(o_hook, weights, *args, **kwargs)
        return o["combiner_output"]

    def process_with_tasks(
        self, x: torch.Tensor, task_ids: List[int], *args, **kwargs
    ) -> torch.Tensor:
        """Process input among multiple tasks."""
        # Get routing weights for specified tasks
        weights = self.router.route(x, task_ids=task_ids, *args, **kwargs)

        # Compute outputs only for specified tasks
        o_hook = {}
        for task_id in sorted(task_ids):
            task_key = str(task_id)
            if task_key in self._hooks:
                o_hook[task_id] = self._hooks[task_key].forward(x, *args, **kwargs)

        # Combine outputs with weights
        o = self.combiner.combine(o_hook, weights, *args, **kwargs)
        return o["combiner_output"]


@HOOK_FACTORY_REGISTRY.register("l2p")
class L2pHookFactory(BaseHookFactory[L2pTaskAwareHook, L2pConfig]):
    """
    Factory for creating L2p hooks.

    This implements the HookFactoryInterface interface for creating L2p-specific hooks,
    following the Factory Pattern for extensible hook creation.

    Args:
        **kwargs: Configuration parameters for L2p
    """

    @handle_peft_errors(error_type="config")
    def __init__(
        self,
        hook_name: str = "l2p",
        router_name: str = "task_inter_router",
        combiner_name: str = "concatenation",
        hook_config: Union[Dict[str, Any], L2pConfig] = L2pConfig(),
        router_config: Dict[str, Any] = {
            "num_experts": 1,
            "embed_dim": 768,
            "routing_strategy": "top_k",
            "weights_initializer": "kaiming_uniform_",
            "top_k": 10,
            "temperature": 1.0,
            "noise_std": 0.0,
            "load_balancing_loss_weight": -1.0,
            "track_routing_history": False,
        },
        combiner_config: Dict[str, Any] = {"topk": 10},
    ):
        super().__init__(
            hook_name=hook_name,
            router_name=router_name,
            combiner_name=combiner_name,
            hook_config=hook_config,
            router_config=router_config,
            combiner_config=combiner_config,
        )

    def create_hook_instance(
        self,
        module: nn.Module,
        router: RouterInterface,
        combiner: CombinerInterface,
        config: L2pConfig,
    ) -> "L2pTaskAwareHook":
        """
        Create a L2p hook instance for a specific module.

        Args:
            module: Module to apply L2p to
            router: Router instance for task-aware routing
            combiner: Combiner instance for output combination
            config: L2p configuration

        Returns:
            L2pTaskAwareHook: Task-aware L2p hook instance

        Raises:
            HookAttachError: If module is not compatible with L2p
        """
        # Ensure config is L2pConfig
        if not isinstance(config, L2pConfig):
            logger.warning(
                f"Expected L2pConfig but got {type(config).__name__}, converting..."
            )
            config = create_peft_config("l2p", config.to_dict())

        # Create and return task-aware hook
        try:
            hook = L2pTaskAwareHook(
                router=router,
                combiner=combiner,
                module=module,
                hook_config=config,
            )
            logger.debug("Successfully created L2p task-aware hook")
            return hook
        except Exception as e:
            logger.error(f"Failed to create L2p hook: {e}")
            raise HookAttachError(f"Failed to create L2p hook: {str(e)}") from e

    def __repr__(self) -> str:
        """String representation of the factory.

        Returns a comprehensive string showing all key L2P configuration parameters,
        making it easy to understand the factory's configuration at a glance.
        """
        # Basic L2P configuration
        config_parts = [
            f"hook_name='{self.hook_name}'",
            f"prompts_per_hook={self.hook_config.prompts_per_hook}",
            f"prompt_dim={self.hook_config.prompt_dim}",
        ]

        # Optional parameters (only show if not default)
        if self.hook_config.shared_prompt_pool:
            config_parts.append("shared_prompt_pool=True")

        if self.hook_config.weights_initializer != "kaiming_uniform_":
            config_parts.append(
                f"weights_initializer='{self.hook_config.weights_initializer}'"
            )

        if self.hook_config.dropout > 0:
            config_parts.append(f"dropout={self.hook_config.dropout}")

        # Router and combiner information
        config_parts.extend(
            [
                f"router='{self.router_name}'",
                f"combiner='{self.combiner_name}'",
            ]
        )

        # Add router configuration highlights
        if hasattr(self, "router_config"):
            router_strategy = self.router_config.get("routing_strategy", "unknown")
            config_parts.append(f"routing_strategy='{router_strategy}'")

            if router_strategy == "top_k":
                top_k = self.router_config.get("top_k", -1)
                config_parts.append(f"top_k={top_k}")

        return f"{self.__class__.__name__}({', '.join(config_parts)})"
