from transformers import Qwen2AudioConfig, Qwen3Config
import logging
logger = logging.getLogger(__name__)

class IdealLLMConfig(Qwen2AudioConfig):
    model_type = "IdealLLM"

    def __init__(
        self,
        audio_encoder_type: str = "cnn",
        projector_hidden_size: int = 768,
        audio_hidden_size: int = 1280,
        num_hidden_layers: int = 36,
        input_dim: int = 896,
        projector_dim: int = 1280,
        **kwargs
    ):
        super().__init__(**kwargs)
        self.audio_encoder_type = audio_encoder_type
        self.projector_hidden_size = projector_hidden_size
        self.audio_hidden_size = audio_hidden_size
        self.input_dim = input_dim
        self.projector_dim = projector_dim
        self.text_config = Qwen3Config.from_pretrained("/apdcephfs/share_976139/users/hongfeixue/model/Qwen3-8B-base")

        # self.text_config.attention_bias = False
        # self.text_config.attention_dropout = 0.0
        # self.text_config.num_hidden_layers = 36
        # self.text_config.hidden_size = 4096
        # self.text_config.initializer_range = 0.02
        # self.text_config.intermediate_size = 12288
        # self.text_config.rope_theta = 1000000.0
        # self.text_config.rope_scaling = None
        # self.text_config.max_position_embeddings = 40960
        # self.text_config.num_attention_heads = 32
        # self.text_config.num_key_value_heads = 8
        # self.text_config.rms_norm_eps = 1e-06
        # self.text_config.sliding_window = None
        # self.text_config.vocab_size = 151936


    @classmethod
    def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
        # 兼容原始Qwen2-Audio配置
        config_dict, _ = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
        if "model_type" in config_dict and config_dict["model_type"] != "IdealLLM":
            logger.warning("Converting Qwen2Audio config to IdealLLM format")

        return super().from_pretrained(pretrained_model_name_or_path, **kwargs)
