"""
LoRA-specific hooks for parameter-efficient fine-tuning.

This module provides hooks specifically designed for LoRA (Low-Rank Adaptation)
that apply low-rank updates to linear transformations in neural networks.
"""

import math
from typing import Any, Dict, Optional, Union

import torch
import torch.nn as nn
import torch.nn.functional as F

from continuallearning.interfaces import (
    HookType,
    CombinerInterface,
    RouterInterface,
)
from continuallearning.models.pefts.common.config.config import LoRAConfig
from continuallearning.models.pefts.common.config.config_factory import (
    create_peft_config,
)
from continuallearning.models.pefts.common.utils.error_handlers import (
    handle_peft_errors,
    validate_tensor_operation,
)
from continuallearning.models.pefts.common.utils.exceptions import (
    ForwardPassError,
    HookAttachError,
)
from continuallearning.models.pefts.common.utils.utils import is_linear_module
from continuallearning.models.pefts.hooks._base_hook import (
    BaseHook,
    BaseTaskAwareHook,
    BaseHookFactory,
)
from continuallearning.models.pefts.common.context import (
    get_current_task_ids,
)
from continuallearning.registry import (
    HOOK_REGISTRY,
    HOOK_FACTORY_REGISTRY,
)
from continuallearning.utils.logging import get_logger

# from continuallearning.models.pefts.hook_managers.hook_manager import UnifiedHookManager

# 使用自定义日志系统创建记录器
logger = get_logger(__name__)


class LoRALayerHook(BaseHook[LoRAConfig]):
    """
    Hook for applying LoRA (Low-Rank Adaptation) to linear layers.

    LoRA replaces a weight matrix W with W + AB where A and B are low-rank matrices.
    This hook adds trainable low-rank adapters to any linear transformation module.

    Args:
        module: Linear module to adapt
        config: LoRA configuration
    """

    @handle_peft_errors(error_type="hook", collect_context=True)
    def __init__(
        self,
        *,
        module: nn.Module,
        hook_config: LoRAConfig,
        **kwargs,
    ):
        # 首先调用父类的 __init__
        super().__init__(module=module, hook_config=hook_config, **kwargs)

        self.hook_config = hook_config
        self._hook_type: HookType = HookType.FORWARD_REPLACE
        # Validate module for LoRA compatibility
        if not is_linear_module(module):
            raise HookAttachError(
                f"Module {type(module).__name__} is not a linear module, cannot apply LoRA"
            )

        # Extract shapes for weight matrix
        self.in_features = self.hook_config.in_features
        self.out_features = self.hook_config.out_features
        # Initialize low-rank matrices
        self.lora_A = nn.Parameter(torch.zeros(self.hook_config.rank, self.in_features))
        self.lora_B = nn.Parameter(
            torch.zeros(self.out_features, self.hook_config.rank)
        )
        self.scaling = self.hook_config.scaling
        # Optional dropout layer
        self.dropout = (
            nn.Dropout(self.hook_config.dropout)
            if self.hook_config.dropout > 0.0
            else None
        )
        self.weights_initializer = self.hook_config.weights_initializer
        self._init_weights()

    def _init_weights(
        self,
    ) -> None:
        """Initialize router weights using Xavier/Glorot initialization.

        Args:
            weights: Parameter tensor to initialize
        """
        if self.weights_initializer == "kaiming_uniform_":
            nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))
        elif self.weights_initializer == "xavier_uniform_":
            nn.init.xavier_uniform_(self.lora_A)
        elif self.weights_initializer == "normal_":
            nn.init.normal_(self.lora_A, mean=0.0, std=1.0)
        else:
            raise ValueError(f"Unknown weights initializer: {self.weights_initializer}")

        nn.init.zeros_(self.lora_B)

    @handle_peft_errors(error_type="forward", collect_context=True)
    @validate_tensor_operation("LoRA forward", expected_dims=None)
    def _forward(self, x, *args, **kwargs) -> torch.Tensor:
        """
        Apply LoRA transformation to the input tensor.
        """
        # Compute the LoRA output
        if self.dropout is not None:
            x = self.dropout(x)
        lora_output = F.linear(x, self.lora_B @ self.lora_A) * self.scaling
        return lora_output

    @handle_peft_errors(error_type="forward", collect_context=True)
    def _hook_forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
        """
        Apply LoRA transformation and call original forward method.

        This implements W' = W + BA scaling, where:
        - W is the original weight matrix
        - B and A are low-rank matrices

        Args:
            x: Input tensor
            *args, **kwargs: Additional arguments for the original layer

        Returns:
            torch.Tensor: Transformed output tensor directly

        Raises:
            RuntimeError: If the hook is not properly attached
        """
        if not self.is_attached or self._original_forward is None:
            raise ForwardPassError(
                "Hook not properly attached",
                adapter_type="LoRA",
            ).add_context(
                module_type=type(self.module).__name__,
                module_path=getattr(self.module, "_module_path", "unknown"),
            )

        original_output = self._original_forward(x, *args, **kwargs)
        # Apply LoRA transformation and combine
        lora_output = self._forward(x)
        # Combine outputs
        combined_output = original_output + lora_output

        return combined_output


@HOOK_REGISTRY.register("lora")
class LoRATaskAwareHook(BaseTaskAwareHook[LoRALayerHook]):
    """
    Task-aware hook for LoRA that applies low-rank updates to linear layers.

    This hook extends the LoRALayerHook to support task-specific adaptations,
    allowing different tasks to have their own low-rank matrices.

    Args:
        module: Linear module to adapt
        config: LoRA configuration
        task_id: Identifier for the current task
    """

    _hook_func = LoRALayerHook

    @handle_peft_errors(error_type="hook", collect_context=True)
    def __init__(
        self,
        router: RouterInterface,
        combiner: CombinerInterface,
        module: nn.Module,
        hook_config: Optional[LoRAConfig] = None,
    ):
        super().__init__(
            router=router, combiner=combiner, module=module, hook_config=hook_config
        )

    @handle_peft_errors(error_type="forward", collect_context=True)
    def _hook_forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
        """
        Apply LoRA transformation and call original forward method.

        This implements W' = W + BA scaling, where:
        - W is the original weight matrix
        - B and A are low-rank matrices

        Args:
            x: Input tensor
            *args, **kwargs: Additional arguments for the original layer

        Returns:
            torch.Tensor: Transformed output tensor directly

        Raises:
            RuntimeError: If the hook is not properly attached
        """
        if not self.is_attached or self._original_forward is None:
            raise ForwardPassError(
                "Hook not properly attached",
                adapter_type="LoRA",
            ).add_context(
                module_type=type(self.module).__name__,
                module_path=getattr(self.module, "_module_path", "unknown"),
            )

        context_task_ids = get_current_task_ids()
        original_output = self._original_forward(x, *args, **kwargs)
        lora_output = self.forward(x, task_ids=context_task_ids, *args, **kwargs)

        # Combine outputs
        combined_output = original_output + lora_output

        return combined_output


@HOOK_FACTORY_REGISTRY.register("lora")
class LoRAHookFactory(BaseHookFactory[LoRATaskAwareHook, LoRAConfig]):
    """
    Factory for creating LoRA hooks.

    This implements the HookFactoryInterface interface for creating LoRA-specific hooks,
    following the Factory Pattern for extensible hook creation.

    Args:
        **kwargs: Configuration parameters for LoRA
    """

    @handle_peft_errors(error_type="config")
    def __init__(
        self,
        hook_name: str = "lora",
        router_name: str = "task_inter_router",
        combiner_name: str = "task_inter_weightedsum",
        hook_config: Union[Dict[str, Any], LoRAConfig] = LoRAConfig(),
        router_config: Dict[str, Any] = {
            "num_experts": 1,
            "embed_dim": 768,
            "routing_strategy": "soft",
            "weights_initializer": "kaiming_uniform_",
            "top_k": -1,
            "temperature": 1.0,
            "noise_std": 0.0,
            "load_balancing_loss_weight": -1.0,
            "track_routing_history": False,
        },
        combiner_config: Dict[str, Any] = {},
    ):
        super().__init__(
            hook_name=hook_name,
            router_name=router_name,
            combiner_name=combiner_name,
            hook_config=hook_config,
            router_config=router_config,
            combiner_config=combiner_config,
        )

    def create_hook_instance(
        self,
        module: nn.Module,
        router: RouterInterface,
        combiner: CombinerInterface,
        config: LoRAConfig,
    ) -> "LoRATaskAwareHook":
        """
        Create a LoRA hook instance for a specific module.

        Args:
            module: Module to apply LoRA to
            router: Router instance for task-aware routing
            combiner: Combiner instance for output combination
            config: LoRA configuration

        Returns:
            LoRATaskAwareHook: Task-aware LoRA hook instance

        Raises:
            HookAttachError: If module is not compatible with LoRA
        """
        # Validate module compatibility
        if not is_linear_module(module):
            error_msg = (
                f"Module type {type(module).__name__} is not compatible with LoRA. "
                f"LoRA can only be applied to linear modules."
            )
            logger.error(error_msg)
            raise HookAttachError(error_msg)

        # Log module information for debugging
        if hasattr(module, "weight"):
            in_features = module.weight.shape[1]
            out_features = module.weight.shape[0]
            logger.debug(
                f"Creating LoRA hook for linear module: "
                f"in_features={in_features}, out_features={out_features}, "
                f"rank={config.rank}"
            )

        # Ensure config is LoRAConfig
        if not isinstance(config, LoRAConfig):
            logger.warning(
                f"Expected LoRAConfig but got {type(config).__name__}, converting..."
            )
            config = create_peft_config("lora", config.to_dict())

        # Create and return task-aware hook
        try:
            hook = LoRATaskAwareHook(
                router=router,
                combiner=combiner,
                module=module,
                hook_config=config,
            )
            logger.debug("Successfully created LoRA task-aware hook")
            return hook
        except Exception as e:
            logger.error(f"Failed to create LoRA hook: {e}")
            raise HookAttachError(f"Failed to create LoRA hook: {str(e)}") from e

    def __repr__(self) -> str:
        """String representation of the factory."""
        return (
            f"{self.__class__.__name__}("
            f"rank={self.hook_config.rank}, "
            f"alpha={self.hook_config.alpha}, "
            f"dropout={self.hook_config.dropout}, "
            f"router={self.router_cls.__name__ if self.router_cls else 'None'}, "
            f"combiner={self.combiner_cls.__name__ if self.combiner_cls else 'None'})"
        )
