"""
Utility functions for Parameter-Efficient Fine-Tuning (PEFT) configurations.

This module provides helper functions for parsing, validating, and handling
configuration objects for PEFT methods.
"""

from typing import Dict, Any, Optional, Union, Type, TypeVar
from dataclasses import is_dataclass, asdict

from .config import BasePEFTConfig
from continuallearning.models.pefts.common.utils.exceptions import ConfigurationError
from continuallearning.utils.logging import get_logger

logger = get_logger(__name__)

T = TypeVar("T", bound=BasePEFTConfig)


def validate_config(
    config: Union[Dict[str, Any], BasePEFTConfig], config_class: Type[T]
) -> T:
    """
    Validate a configuration object against a specific configuration class.

    Args:
        config: Configuration object or dictionary
        config_class: Target configuration class to validate against

    Returns:
        T: Validated configuration object of the specified class

    Raises:
        ConfigurationError: If validation fails
    """
    try:
        # Convert to dictionary if it's a dataclass instance
        if is_dataclass(config):
            config_dict = asdict(config)
        elif isinstance(config, dict):
            config_dict = config
        else:
            raise ValueError(f"Expected dataclass or dict, got {type(config)}")

        # Get expected fields
        fields = {
            field.name: field for field in config_class.__dataclass_fields__.values()
        }

        # Filter to include only valid fields
        filtered_dict = {k: v for k, v in config_dict.items() if k in fields}

        # Create instance
        return config_class(**filtered_dict)
    except Exception as e:
        # Provide helpful error message
        raise ConfigurationError(
            f"Failed to validate configuration against {config_class.__name__}: {str(e)}",
            config=config,
        ) from e


def detect_attention_heads(backbone: Any) -> int:
    """
    Automatically detect the number of attention heads in a backbone model.

    Args:
        backbone: Backbone model to inspect

    Returns:
        int: Detected number of attention heads, or 12 if detection fails
    """
    # Search for common attention-related attributes
    possible_attrs = ["num_attention_heads", "n_heads", "num_heads", "heads", "n_head"]

    for attr in possible_attrs:
        if hasattr(backbone, attr):
            return getattr(backbone, attr)

    # Search in config object if available
    if hasattr(backbone, "config"):
        config = backbone.config
        for attr in possible_attrs:
            if hasattr(config, attr):
                return getattr(config, attr)

    # If we reach here, we couldn't detect automatically
    logger.warning(
        "Could not automatically detect number of attention heads. "
        "Using default value of 12. This may lead to incorrect behavior."
    )
    return 12


def detect_hidden_size(backbone: Any) -> int:
    """
    Automatically detect the hidden size in a backbone model.

    Args:
        backbone: Backbone model to inspect

    Returns:
        int: Detected hidden size, or 768 if detection fails
    """
    # Search for common hidden size attributes
    possible_attrs = [
        "hidden_size",
        "d_model",
        "emb_dim",
        "embedding_dim",
        "hidden_dim",
    ]

    for attr in possible_attrs:
        if hasattr(backbone, attr):
            return getattr(backbone, attr)

    # Search in config object if available
    if hasattr(backbone, "config"):
        config = backbone.config
        for attr in possible_attrs:
            if hasattr(config, attr):
                return getattr(config, attr)

    # Try to find embedding layer's output dimension
    if hasattr(backbone, "embeddings") and hasattr(
        backbone.embeddings, "embedding_dim"
    ):
        return backbone.embeddings.embedding_dim

    # If we reach here, we couldn't detect automatically
    logger.warning(
        "Could not automatically detect hidden size. "
        "Using default value of 768. This may lead to incorrect behavior."
    )
    return 768


def merge_config_with_args(
    config: Optional[Union[Dict[str, Any], BasePEFTConfig]],
    args: Dict[str, Any],
    config_class: Type[T],
) -> T:
    """
    Merge configuration with explicit arguments, prioritizing arguments.

    Args:
        config: Base configuration to start with
        args: Arguments to override configuration values
        config_class: Target configuration class

    Returns:
        T: Merged configuration of the specified class
    """
    # Start with an empty dictionary
    merged_dict: Dict[str, Any] = {}

    # Add values from config if available
    if config is not None:
        if is_dataclass(config):
            merged_dict.update(asdict(config))
        elif isinstance(config, dict):
            merged_dict.update(config)

    # Override with explicitly provided arguments
    for name, value in args.items():
        # Only add arguments that are not None and are fields in the config class
        if value is not None and name in config_class.__dataclass_fields__:
            merged_dict[name] = value

    # Create and return instance
    return config_class(**merged_dict)
