"""
Configuration classes for parameter-efficient fine-tuning.

This module provides a set of standardized configuration classes for different
PEFT methods, ensuring consistent parameter handling across the system.
"""

from dataclasses import dataclass, field, asdict
from typing import (
    Dict,
    List,
    Any,
    Type,
    TypeVar,
)

T = TypeVar("T", bound="BasePEFTConfig")


@dataclass
class BasePEFTConfig:
    """
    Base configuration class for all PEFT methods.

    This provides common parameters and functionality shared by all PEFT configurations.
    """

    peft_type: str = "base"
    weights_initializer: str = "kaiming_uniform_"

    def __post_init__(self):
        """Validate configuration parameters."""
        import torch.nn.init as init

        # Check if the weights_initializer is a valid PyTorch init method
        if not hasattr(init, self.weights_initializer):
            # Get all available init methods
            available_methods = [
                attr
                for attr in dir(init)
                if callable(getattr(init, attr)) and not attr.startswith("_")
            ]
            raise ValueError(
                f"Invalid weights_initializer: '{self.weights_initializer}'. "
                f"Must be a valid torch.nn.init method. "
                f"Available methods: {', '.join(sorted(available_methods))}"
            )

    def to_dict(self) -> Dict[str, Any]:
        """Convert configuration to dictionary."""
        return asdict(self)

    @classmethod
    def from_dict(cls: Type[T], config_dict: Dict[str, Any]) -> T:
        """
        Create configuration from dictionary.

        This method filters the input dictionary to only include fields
        that are defined in the dataclass, preventing unexpected keyword
        arguments errors.

        Args:
            config_dict: Dictionary containing configuration parameters

        Returns:
            Instance of the configuration class
        """
        filtered_dict = {
            k: v
            for k, v in config_dict.items()
            if k in [field.name for field in cls.__dataclass_fields__.values()]
        }

        # Create instance
        return cls(**filtered_dict)


@dataclass
class LoRAConfig(BasePEFTConfig):
    """
    Configuration for LoRA (Low-Rank Adaptation).

    LoRA is a technique that freezes the pre-trained model weights and injects trainable
    rank decomposition matrices into transformer layers, greatly reducing the number
    of trainable parameters for downstream tasks.

    References:
    - "LoRA: Low-Rank Adaptation of Large Language Models" (Hu et al., 2021)
      https://arxiv.org/abs/2106.09685

    Args:
        peft_type: Type identifier for PEFT method, should be "lora"
        rank: Rank of the update matrices (r << d)
        alpha: Scaling factor for LoRA parameterization
        dropout: Dropout probability for LoRA layers
        target_modules: List of module names to apply LoRA to
        bias: How to handle bias parameters: "none", "all", or "lora_only"
        task_id: Task identifier for multi-task settings
        task_specific: Whether adaptation is task-specific
        merge_weights: Whether to merge weights during inference for better performance
        use_rslora: Whether to use rank-stabilized LoRA (RSLoRA) variant
        init_lora_weights: Initialization scheme for LoRA weights (True="gaussian", False="zero")
        module_mapping: Custom mapping for modules that don't match standard naming

    Example:
        ```python
        config = LoRAConfig(
            rank=16,
            alpha=32,
            dropout=0.1,
            target_modules=["query", "key", "value"]
        )
        ```
    """

    peft_type: str = "lora"
    rank: int = 8
    alpha: float = 32.0
    dropout: float = 0.0
    bias: str = "none"  # Options: "none", "all", "lora_only"
    use_rslora: bool = False
    in_features: int = 768 * 4
    out_features: int = 768
    # 自动计算的字段
    scaling: float = field(init=False)

    def __post_init__(self):
        """Validate configuration parameters and compute scaling factor."""
        # Compute scaling factor based on alpha and rank
        super().__post_init__()
        self.scaling = self.alpha / self.rank

        # Validation checks
        if self.rank <= 0:
            raise ValueError(f"Invalid rank: {self.rank}. Must be positive.")

        if self.alpha <= 0:
            raise ValueError(f"Invalid alpha: {self.alpha}. Must be positive.")

        if self.dropout < 0 or self.dropout > 1:
            raise ValueError(
                f"Invalid dropout: {self.dropout}. Must be between 0 and 1."
            )

        if self.bias not in ["none", "all", "lora_only"]:
            raise ValueError(
                f"Invalid bias option: {self.bias}. Must be one of: none, all, lora_only."
            )


@dataclass
class BypassLoRAConfig(LoRAConfig):
    """
    Configuration for Bypass LoRA adaptation.

    Bypass LoRA extends the standard LoRA approach by adding trainable low-rank
    bypass connections between intermediate and output layers in transformer blocks.
    This enables more powerful adaptation while still maintaining parameter efficiency.

    This implementation focuses on transformer architectures where each layer typically
    has an intermediate feedforward network followed by an output projection.

    Args:
        peft_type: Type identifier for PEFT method, should be "bypass_lora"
        intermediate_modules: Patterns to identify intermediate layer modules
        output_modules: Patterns to identify output layer modules
        bypass_weight: Weight for the bypass connection (1.0 = full strength)
        use_layer_norm: Whether to apply layer normalization to bypass activations
        shared_rank: Whether to use the same rank for all bypass matrices
        use_balancing: Whether to use balancing factors for intermediate and output matrices

    In addition to all parameters from LoRAConfig:
        rank, alpha, dropout, target_modules, etc.

    Example:
        ```python
        config = BypassLoRAConfig(
            rank=4,
            alpha=8.0,
            bypass_weight=0.8,
            intermediate_modules=["*.intermediate.dense"],
            output_modules=["*.output.dense"]
        )
        ```
    """

    peft_type: str = "bypass_lora"
    in_features: int = 768
    out_features: int = 768


@dataclass
class PromptConfig(BasePEFTConfig):
    """
    Configuration for prompt tuning methods.

    Prompt tuning adds learnable prompt tokens to the input sequence of a model.
    This is the base configuration for all prompt-based methods.

    References:
    - "The Power of Scale for Parameter-Efficient Prompt Tuning" (Lester et al., 2021)

    Args:
        peft_type: Type identifier for PEFT method
        prompt_length: Number of prompt tokens
        prompt_dim: Dimension of prompt embeddings
        dropout: Dropout probability for prompt embeddings
        initialization: Initialization method ("random", "zero", "from_vocab")
        projection_dim: Optional projection dimension for prompts
        reparam: Whether to use reparameterization for prompts

    Example:
        ```python
        config = PromptConfig(
            prompt_length=20,
            prompt_dim=768,
            dropout=0.1
        )
        ```
    """

    peft_type: str = "prompt"
    prompt_length: int = 10
    prompt_dim: int = 768
    dropout: float = 0.0

    def __post_init__(self):
        super().__post_init__()
        """Validate configuration parameters."""
        if self.prompt_length <= 0:
            raise ValueError(
                f"prompt_length must be positive, got {self.prompt_length}"
            )

        if self.prompt_dim <= 0:
            raise ValueError(f"prompt_dim must be positive, got {self.prompt_dim}")

        if not 0 <= self.dropout < 1:
            raise ValueError(f"dropout must be in [0, 1), got {self.dropout}")


@dataclass
class L2pConfig(PromptConfig):
    """
    Configuration for Learning to Prompt (L2P) method.

    L2P maintains a learnable prompt pool and uses a query mechanism to select
    the most relevant prompts for each input. This enables instance-specific
    prompt selection for continual learning without task boundaries.

    Key Components:
    - Prompt Pool: A set of learnable prompt vectors
    - Prompt Keys: Learnable keys associated with each prompt for selection
    - Query Function: Maps input features to query vectors for prompt retrieval

    References:
    - "Learning to Prompt for Continual Learning" (Wang et al., CVPR 2022)
      https://arxiv.org/abs/2112.08654

    Args:
        pool_size: Number of prompts in the pool
        top_k: Number of prompts to select for each input
        use_prompt_mask: Whether to use attention mask for selected prompts
        shared_prompt_pool: Whether to share prompt pool across hooks.
                           If True, each hook uses pool_size prompts.
                           If False, each hook uses prompt_length prompts.

    Example:
        ```python
        # Shared pool mode: each hook gets access to all pool_size prompts
        config = L2PConfig(
            prompt_length=5,     # Length of each prompt token sequence
            prompt_dim=768,      # Dimension matching model hidden size
            pool_size=100,       # Total number of prompts in pool
            top_k=5,             # Select 5 prompts per input
            shared_prompt_pool=True,  # Share the entire pool
            dropout=0.0,
            pull_constraint_coeff=0.1
        )

        # Individual mode: each hook gets its own prompt_length prompts
        config = L2PConfig(
            prompt_length=20,    # Each hook gets 20 prompts
            prompt_dim=768,
            pool_size=100,       # Ignored in this mode
            top_k=5,
            shared_prompt_pool=False,  # Each hook has independent prompts
            dropout=0.0
        )
        ```
    """

    peft_type: str = "l2p"

    # Prompt pool configuration
    pool_size: int = 100
    top_k: int = 5
    shared_prompt_pool: bool = True
    # FIXME 这玩意儿不知道用不用的上
    use_prompt_mask: bool = True

    # Computed field for actual prompts per hook
    prompts_per_hook: int = field(init=False)

    def __post_init__(self):
        """Validate L2P-specific configuration and set prompts_per_hook."""
        super().__post_init__()

        if self.pool_size <= 0:
            raise ValueError(f"pool_size must be positive, got {self.pool_size}")

        # Set prompts_per_hook based on shared_prompt_pool mode
        if self.shared_prompt_pool:
            # Shared mode: each hook uses the entire pool
            self.prompts_per_hook = self.pool_size

            if self.top_k > self.pool_size:
                raise ValueError(
                    f"In shared pool mode, top_k ({self.top_k}) cannot exceed "
                    f"pool_size ({self.pool_size})"
                )
        else:
            # Individual mode: each hook uses prompt_length prompts
            self.prompts_per_hook = self.prompt_length

            if self.top_k > self.prompt_length:
                raise ValueError(
                    f"In individual pool mode, top_k ({self.top_k}) cannot exceed "
                    f"prompt_length ({self.prompt_length})"
                )


# Registry of configuration classes
CONFIG_REGISTRY: Dict[str, Type[BasePEFTConfig]] = {
    "lora": LoRAConfig,
    "prompt": PromptConfig,
    "l2p": L2pConfig,
    "base": BasePEFTConfig,
}
