"""
Utility functions for model introspection and detection.

This module provides functions to detect model properties like attention heads,
hidden size, and layer count.
"""

from typing import Any
import logging

# 使用日志系统
logger = logging.getLogger(__name__)


def detect_attention_heads(backbone: Any) -> int:
    """
    自动检测backbone模型中的注意力头数量。

    尝试多种方法检测：
    1. 检查backbone的config属性
    2. 检查常见结构如attention、self_attention等模块
    3. 使用启发式算法基于hidden_size预测

    Args:
        backbone: 骨干网络模型

    Returns:
        int: 检测到的注意力头数量，如果无法确定返回默认值12
    """
    # 默认值
    default_heads = 12

    try:
        # 方法1: 检查backbone的config属性
        if hasattr(backbone, "model") and hasattr(backbone.model, "config"):
            config = backbone.model.config
            # 检查常见的属性名称
            for attr_name in ["num_attention_heads", "n_heads", "num_heads"]:
                if hasattr(config, attr_name):
                    heads = getattr(config, attr_name)
                    logger.info(
                        f"从backbone.model.config.{attr_name}检测到{heads}个注意力头"
                    )
                    return heads

            # 对于HuggingFace模型，config可能是字典类型
            if hasattr(config, "to_dict"):
                config_dict = config.to_dict()
                for key in ["num_attention_heads", "n_heads", "num_heads"]:
                    if key in config_dict:
                        heads = config_dict[key]
                        logger.info(
                            f"从backbone.model.config字典中检测到{heads}个注意力头"
                        )
                        return heads

        # 方法2: 检查backbone的模型结构中是否有注意力相关模块
        model = backbone.model if hasattr(backbone, "model") else backbone
        for name, module in model.named_modules():
            # 检查模块是否有num_heads属性
            if hasattr(module, "num_heads"):
                heads = module.num_heads
                logger.info(f"从模块{name}.num_heads中检测到{heads}个注意力头")
                return heads
            # 检查模块是否有num_attention_heads属性
            if hasattr(module, "num_attention_heads"):
                heads = module.num_attention_heads
                logger.info(
                    f"从模块{name}.num_attention_heads中检测到{heads}个注意力头"
                )
                return heads
            # 检查常见的Transformer结构
            if any(x in name for x in ["attention", "self_attn"]) and hasattr(
                module, "head_dim"
            ):
                if hasattr(module, "hidden_size") and hasattr(module, "head_dim"):
                    heads = module.hidden_size // module.head_dim
                    logger.info(f"从模块{name}的dimensions推导出{heads}个注意力头")
                    return heads

        # 方法3: 使用启发式算法（基于隐藏层大小）
        # 多数Transformer模型的注意力头维度是64或者hidden_size/num_heads
        if hasattr(backbone, "hidden_size"):
            hidden_size = backbone.hidden_size
            # 常见的头大小
            common_head_sizes = [64, 80, 96, 128]

            # 尝试找到一个能整除隐藏层大小的头大小
            for head_dim in common_head_sizes:
                if hidden_size % head_dim == 0:
                    heads = hidden_size // head_dim
                    logger.info(
                        f"基于hidden_size={hidden_size}和推测的head_dim={head_dim}估算出{heads}个注意力头"
                    )
                    return heads

            # 最后尝试，假设head_size是hidden_size的除数
            potential_heads = [8, 12, 16, 24, 32]
            for heads in potential_heads:
                if hidden_size % heads == 0:
                    logger.info(
                        f"基于hidden_size={hidden_size}被{heads}整除，估算出{heads}个注意力头"
                    )
                    return heads

    except Exception as e:
        logger.warning(f"检测注意力头数量时发生错误: {str(e)}")

    logger.warning(
        f"无法自动检测注意力头数量，使用默认值{default_heads}。"
        "如需覆盖，请明确指定num_attention_heads参数。"
    )
    return default_heads


def detect_hidden_size(backbone: Any) -> int:
    """
    自动检测backbone模型的隐藏层大小。

    Args:
        backbone: 骨干网络模型

    Returns:
        int: 检测到的隐藏层大小，如果无法确定返回默认值768
    """
    default_size = 768

    try:
        # 直接检查hidden_size属性
        if hasattr(backbone, "hidden_size"):
            return backbone.hidden_size

        # 检查model的hidden_size属性
        if hasattr(backbone, "model") and hasattr(backbone.model, "hidden_size"):
            return backbone.model.hidden_size

        # 检查config中的hidden_size
        if hasattr(backbone, "config"):
            if hasattr(backbone.config, "hidden_size"):
                return backbone.config.hidden_size
            # 检查d_model (一些transformer架构用这个名称)
            if hasattr(backbone.config, "d_model"):
                return backbone.config.d_model

        # 检查是否有get_feature_dims方法
        if hasattr(backbone, "get_feature_dims"):
            return backbone.get_feature_dims()

        # 检查embedding维度
        if hasattr(backbone, "embeddings") and hasattr(
            backbone.embeddings, "word_embeddings"
        ):
            return backbone.embeddings.word_embeddings.embedding_dim

        # 尝试从模型结构推断
        model = backbone.model if hasattr(backbone, "model") else backbone
        for name, module in model.named_modules():
            if any(x in name for x in ["embed", "embedding"]) and hasattr(
                module, "embedding_dim"
            ):
                return module.embedding_dim

    except Exception as e:
        logger.warning(f"检测隐藏层大小时发生错误: {str(e)}")

    logger.warning(
        f"无法自动检测隐藏层大小，使用默认值{default_size}。"
        "如需覆盖，请明确指定token_dim参数。"
    )
    return default_size


def detect_model_layers(backbone: Any) -> int:
    """
    自动检测backbone模型的层数。

    Args:
        backbone: 骨干网络模型

    Returns:
        int: 检测到的层数，如果无法确定返回默认值12
    """
    default_layers = 12

    try:
        # 检查config中的层数属性
        if hasattr(backbone, "config"):
            for attr in ["num_hidden_layers", "n_layer", "num_layers"]:
                if hasattr(backbone.config, attr):
                    return getattr(backbone.config, attr)

        # 检查模型结构
        model = backbone.model if hasattr(backbone, "model") else backbone

        # 检查encoder的layer列表
        if hasattr(model, "encoder") and hasattr(model.encoder, "layer"):
            return len(model.encoder.layer)

        # 检查layers属性
        if hasattr(model, "layers"):
            return len(model.layers)

        # 检查blocks属性 (ViT等模型)
        if hasattr(model, "blocks"):
            return len(model.blocks)

        # 检查transformer属性 (部分GPT风格模型)
        if hasattr(model, "transformer") and hasattr(model.transformer, "h"):
            return len(model.transformer.h)

    except Exception as e:
        logger.warning(f"检测模型层数时发生错误: {str(e)}")

    logger.warning(
        f"无法自动检测模型层数，使用默认值{default_layers}。"
        "如需覆盖，请明确指定num_layers参数。"
    )
    return default_layers
