"""
Low-Rank Adaptation (LoRA) implementation for continual learning.

This module implements the LoRA method from:
"LoRA: Low-Rank Adaptation of Large Language Models" (Hu et al., 2021)
using a hook-based approach for modular integration.
"""

from typing import Any, Dict, List, Optional, Union, cast

import torch
import torch.nn as nn

from continuallearning.interfaces.models.adapter import AdapterType
from continuallearning.models.backbones.base import BaseBackbone
from continuallearning.models.pefts.common.config import LoRAConfig
from continuallearning.models.pefts.common.config_factory import create_peft_config
from continuallearning.models.pefts.common.exceptions import ConfigurationError
from continuallearning.models.pefts.hooks._base_hook import BaseHookAdapter
from continuallearning.models.pefts.lora.hooks.lora_hooks import LoRAHookManager
from continuallearning.registry import HOOK_ADAPTER_REGISTRY
from continuallearning.utils.logging import get_logger

logger = get_logger(__name__)


@HOOK_ADAPTER_REGISTRY.register()
class HookBasedLoRAAdapter(BaseHookAdapter):
    """
    Hook-based implementation of Low-Rank Adaptation (LoRA).

    LoRA adapts models by injecting trainable low-rank matrices into
    linear transformations, allowing efficient adaptation with minimal
    added parameters.

    Args:
        backbone: Backbone model to adapt
        rank: Dimension of low-rank approximation
        alpha: Scaling factor
        dropout: Dropout probability for LoRA
        target_modules: List of module name patterns to apply LoRA to
        task_specific: Whether to use separate weights per task
        num_tasks: Number of tasks (required if task_specific=True)
        adapter_type: Type of adapter (usually PRE_BACKBONE for LoRA)
        config: Optional configuration object or dictionary. If provided,
                this takes precedence over the individual parameters.
    """

    def __init__(
        self,
        backbone: BaseBackbone,
        rank: int = 8,
        alpha: float = 32.0,
        dropout: float = 0.0,
        target_modules: List[str] = [
            "query",
            "key",
            "value",
            "dense",
            "fc",
            "out_proj",
        ],
        task_specific: bool = False,
        num_tasks: int = 10,
        adapter_type: AdapterType = AdapterType.PRE_BACKBONE,
        config: Optional[Union[Dict[str, Any], LoRAConfig]] = None,
    ):
        """
        Initialize a LoRA adapter.

        Args:
            backbone: Backbone model to adapt
            rank: Dimension of low-rank approximation (r in the LoRA paper)
            alpha: Scaling factor (alpha in the LoRA paper)
            dropout: Dropout probability for LoRA
            target_modules: List of module name patterns to apply LoRA to
            task_specific: Whether to use separate weights per task
            num_tasks: Number of tasks (required if task_specific=True)
            adapter_type: Type of adapter (usually PRE_BACKBONE for LoRA)
            config: Optional configuration object or dictionary. If provided,
                    this takes precedence over the individual parameters.
        """
        # Initialize the base hook adapter with task-specific settings
        super().__init__(
            backbone=backbone,
            adapter_type=adapter_type,
            task_specific=task_specific,
            num_tasks=num_tasks,
        )

        # Use unified config factory to handle all configuration scenarios
        try:
            # Process configuration, prioritizing explicit config object
            if config is not None:
                if isinstance(config, dict):
                    self.config = create_peft_config("lora", config)
                else:
                    self.config = config
            else:
                # Create configuration from parameters
                self.config = create_peft_config(
                    "lora",
                    {
                        "rank": rank,
                        "alpha": alpha,
                        "dropout": dropout,
                        "target_modules": target_modules,
                        "task_specific": task_specific,
                        "num_tasks": num_tasks,
                    },
                )

            # Ensure we have a LoRA config
            if not isinstance(self.config, LoRAConfig):
                self.config = cast(LoRAConfig, self.config)

            # Log configuration
            logger.debug(f"LoRA adapter initialized with config: {self.config}")

        except Exception as e:
            # Provide informative error message on configuration failure
            raise ConfigurationError(
                f"Failed to initialize LoRA configuration: {str(e)}", method="lora"
            ) from e

    @property
    def lora_config(self) -> LoRAConfig:
        """
        提供向后兼容的访问器，保持与现有代码的兼容性

        Returns:
            LoRAConfig: 当前配置
        """
        return self.config

    def adapt_representation(
        self,
        hidden_states: torch.Tensor,
        task_id: Optional[int] = None,
        layer_idx: Optional[int] = None,
        **kwargs,
    ) -> torch.Tensor:
        """
        Apply adaptation to model representations (hidden states).

        For LoRA adapters, this method differs from prompt-based adapters in that
        LoRA works by modifying the weights of linear layers through hooks rather
        than directly manipulating the hidden states. This implementation primarily
        serves as a compatibility layer for the unified adapter interface.

        Since LoRA's adaptations are applied automatically through the attached hooks
        during the forward pass of each linear layer, this method simply returns the
        input hidden states unchanged. The actual adaptation happens within the
        LoRALayerHook._hooked_forward method.

        Args:
            hidden_states: Input hidden states (passed through unchanged)
            task_id: Optional task identifier (used to prepare the correct task)
            layer_idx: Optional layer index (unused in LoRA)
            **kwargs: Additional arguments (unused in LoRA)

        Returns:
            torch.Tensor: The input hidden states (unchanged)
        """
        # If task_id is provided, ensure the correct task is active
        if task_id is not None and task_id != self.current_task:
            self.prepare_task(task_id)

        # For LoRA, adaptation happens at the weight level through hooks,
        # not directly on hidden states, so we just return the input
        return hidden_states

    def _create_hook_manager(self, model: nn.Module, task_id: int) -> LoRAHookManager:
        """
        Create a hook manager for a specific task.

        This centralizes hook manager creation logic and ensures consistent settings.

        Args:
            model: Model to apply hooks to
            task_id: Task identifier

        Returns:
            LoRAHookManager: Hook manager for the task
        """
        # Create a task-specific config - clone the base config but update task_id
        task_config = LoRAConfig(
            rank=self.config.rank,
            alpha=self.config.alpha,
            dropout=self.config.dropout,
            target_modules=self.config.target_modules,
            task_id=task_id,
        )

        # Create and return the hook manager
        return LoRAHookManager(model=model, hook_factory=None, config=task_config)
