from transformers import Qwen2AudioForConditionalGeneration, AutoProcessor, WhisperForConditionalGeneration, Qwen2ForCausalLM, WhisperForConditionalGeneration, Qwen2AudioProcessor, WhisperFeatureExtractor, AutoTokenizer
import pdb

# 加载修改后的 config
from transformers import (
    WhisperConfig,
    Qwen2Config,
    Qwen2AudioConfig,
    CONFIG_MAPPING,
)
import json
import sys

def build_merged_config(whisper_path: str, qwen_lm_path: str, output_path: str):
    """自动整合 Whisper 和 Qwen-LM 配置生成 Qwen2Audio 配置"""
    
    # 加载基础配置
    whisper_config = WhisperConfig.from_pretrained(whisper_path)
    qwen_lm_config = Qwen2Config.from_pretrained(qwen_lm_path)
    
    # 配置参数映射规则
    audio_config_mapper = {
        "num_mel_bins": "num_mel_bins",
        "encoder_layers": "encoder_layers",
        "encoder_attention_heads": "encoder_attention_heads",
        "encoder_ffn_dim": "encoder_ffn_dim",
        "d_model": "d_model",
        "activation_function": "activation_function",
        "max_source_positions": "max_source_positions",
        "scale_embedding": "scale_embedding"
    }
    
    text_config_mapper = {
        "bos_token_id": "bos_token_id",
        "eos_token_id": "eos_token_id",
        "hidden_size": "hidden_size",
        "intermediate_size": "intermediate_size",
        "num_hidden_layers": "num_hidden_layers",
        "num_attention_heads": "num_attention_heads",
        "num_key_value_heads": "num_key_value_heads",
        "max_position_embeddings": "max_position_embeddings",
        "rope_theta": "rope_theta",
        "rms_norm_eps": "rms_norm_eps",
        "sliding_window": "sliding_window",
        "tie_word_embeddings": "tie_word_embeddings",
        "use_mrope": "use_mrope"
    }
    
    # 自动生成 audio_config
    audio_config = {
        "model_type": "qwen2_audio_encoder",
        **{k: getattr(whisper_config, v) for k, v in audio_config_mapper.items()}
    }
    
    # 自动生成 text_config
    text_config = {
        "model_type": "qwen2",
        **{k: getattr(qwen_lm_config, v) for k, v in text_config_mapper.items()}
    }
    
    # 构建完整配置
    merged_config_dict = {
        "architectures": ["Qwen2AudioForConditionalGeneration"],
        "model_type": "qwen2_audio",
        "audio_config": audio_config,
        "text_config": text_config,
        "audio_token_index": 151646,  # 保持 Qwen-Audio 特殊 token
        "ignore_index": -100,
        "vocab_size": qwen_lm_config.vocab_size,
        "transformers_version": "4.40.1",
        "is_encoder_decoder": False,
        "decoder_start_token_id": qwen_lm_config.bos_token_id
    }
    
    # 转换为 Qwen2AudioConfig 对象并验证
    merged_config = Qwen2AudioConfig.from_dict(merged_config_dict)
    merged_config.save_pretrained(output_path)
    
    # 返回配置对象供后续使用
    return merged_config



new_model_path = sys.argv[1]
WHISPER_MODEL_PATH = "/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/openai/whisper-tiny"
QWEN_MODEL_PATH = "/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/Qwen/Qwen2.5-0.5B"

ORIG_QWEN_AUDIO_PATH = "/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/Qwen/Qwen2-Audio-7B"
# generate new config
config = build_merged_config(WHISPER_MODEL_PATH, QWEN_MODEL_PATH, new_model_path)

whisper_model = WhisperForConditionalGeneration.from_pretrained(WHISPER_MODEL_PATH)

qwen_model = Qwen2ForCausalLM.from_pretrained(QWEN_MODEL_PATH)

new_model = Qwen2AudioForConditionalGeneration(config)

# load to current model
new_model.audio_tower.load_state_dict(whisper_model.model.encoder.state_dict())
new_model.language_model.load_state_dict(qwen_model.state_dict())



# load processors
whisper_extractor = WhisperFeatureExtractor.from_pretrained(WHISPER_MODEL_PATH)
qwen_tokenizer = AutoTokenizer.from_pretrained(ORIG_QWEN_AUDIO_PATH)
processor = Qwen2AudioProcessor( # 自动识别组合类型
    feature_extractor=whisper_extractor,
    tokenizer=qwen_tokenizer,
)
# pdb.set_trace() 
new_model.save_pretrained(new_model_path)
processor.save_pretrained(new_model_path)

# CHECK
new_processor = AutoProcessor.from_pretrained(new_model_path)
model = Qwen2AudioForConditionalGeneration.from_pretrained(new_model_path)

print(f"NEW MERGED MODEL Output TO {new_model_path}")
# pdb.set_trace()
