import json
import pdb
import sys

import torch

# 加载修改后的 config
from transformers import (
    CONFIG_MAPPING,
    AutoProcessor,
    AutoTokenizer,
    Qwen2AudioConfig,
    Qwen2AudioProcessor,
    Qwen2Config,
    Qwen2ForCausalLM,
    Qwen3Config,
    WhisperConfig,
    WhisperFeatureExtractor,
    WhisperForConditionalGeneration,
)

from modeling_IdealLLM_v2 import IdealLLMForConditionalGeneration, IdealLLMModel


def build_merged_config(whisper_path: str, qwen_lm_path: str, output_path: str):
    """自动整合 Whisper 和 Qwen-LM 配置生成 Qwen2Audio 配置"""
    
    # 加载基础配置
    whisper_config = WhisperConfig.from_pretrained(whisper_path)
    qwen_lm_config = Qwen3Config.from_pretrained(qwen_lm_path)
    
    # 配置参数映射规则
    audio_config_mapper = {
        "num_mel_bins": "num_mel_bins",
        "encoder_layers": "encoder_layers",
        "encoder_attention_heads": "encoder_attention_heads",
        "encoder_ffn_dim": "encoder_ffn_dim",
        "d_model": "d_model",
        "activation_function": "activation_function",
        "max_source_positions": "max_source_positions",
        "scale_embedding": "scale_embedding"
    }
    
    text_config_mapper = {
        "bos_token_id": "bos_token_id",
        "eos_token_id": "eos_token_id",
        "hidden_size": "hidden_size",
        "intermediate_size": "intermediate_size",
        "num_hidden_layers": "num_hidden_layers",
        "num_attention_heads": "num_attention_heads",
        "num_key_value_heads": "num_key_value_heads",
        "max_position_embeddings": "max_position_embeddings",
        "rope_theta": "rope_theta",
        "rms_norm_eps": "rms_norm_eps",
        "sliding_window": "sliding_window",
        "tie_word_embeddings": "tie_word_embeddings",
        # "use_mrope": "use_mrope"
    }
    
    # 自动生成 audio_config
    audio_config = {
        "model_type": "qwen2_audio_encoder",
        **{k: getattr(whisper_config, v) for k, v in audio_config_mapper.items()}
    }
    
    # 自动生成 text_config
    text_config = {
        "model_type": "qwen3",
        **{k: getattr(qwen_lm_config, v) for k, v in text_config_mapper.items()}
    }


    text_config.update({"vocab_size": qwen_lm_config.vocab_size})
    
    # 构建完整配置
    merged_config_dict = {
        "architectures": ["Qwen2AudioForConditionalGeneration"],
        "model_type": "qwen2_audio",
        "audio_config": audio_config,
        "text_config": text_config,
        "audio_token_index": 151646,  # 保持 Qwen-Audio 特殊 token
        "ignore_index": -100,
        "vocab_size": qwen_lm_config.vocab_size,
        "transformers_version": "4.51.0",
        "is_encoder_decoder": False,
        "decoder_start_token_id": qwen_lm_config.bos_token_id
    }
    
    # 转换为 Qwen2AudioConfig 对象并验证
    merged_config = Qwen2AudioConfig.from_dict(merged_config_dict)
    merged_config.save_pretrained(output_path)
    
    # 返回配置对象供后续使用
    return merged_config


adapter_key_mapping = {
    "layers.0.feed_forward.w_1.weight": "layers.0.linear1.weight",
    "layers.0.feed_forward.w_1.bias": "layers.0.linear1.bias",
    "layers.0.feed_forward.w_2.weight": "layers.0.linear2.weight",
    "layers.0.feed_forward.w_2.bias": "layers.0.linear2.bias",
    "layers.0.self_attn.linear_q.weight": "layers.0.self_attn.in_proj_weight",
    "layers.0.self_attn.linear_q.bias": "layers.0.self_attn.in_proj_bias",
    "layers.0.self_attn.linear_k.weight": "layers.0.self_attn.in_proj_weight",
    "layers.0.self_attn.linear_k.bias": "layers.0.self_attn.in_proj_bias",
    "layers.0.self_attn.linear_v.weight": "layers.0.self_attn.in_proj_weight",
    "layers.0.self_attn.linear_v.bias": "layers.0.self_attn.in_proj_bias",
    "layers.0.self_attn.linear_out.weight": "layers.0.self_attn.out_proj.weight",
    "layers.0.self_attn.linear_out.bias": "layers.0.self_attn.out_proj.bias",
    "layers.1.feed_forward.w_1.weight": "layers.1.linear1.weight",
    "layers.1.feed_forward.w_1.bias": "layers.1.linear1.bias",
    "layers.1.feed_forward.w_2.weight": "layers.1.linear2.weight",
    "layers.1.feed_forward.w_2.bias": "layers.1.linear2.bias",
    "layers.1.self_attn.linear_q.weight": "layers.1.self_attn.in_proj_weight",
    "layers.1.self_attn.linear_q.bias": "layers.1.self_attn.in_proj_bias",
    "layers.1.self_attn.linear_k.weight": "layers.1.self_attn.in_proj_weight",
    "layers.1.self_attn.linear_k.bias": "layers.1.self_attn.in_proj_bias",
    "layers.1.self_attn.linear_v.weight": "layers.1.self_attn.in_proj_weight",
    "layers.1.self_attn.linear_v.bias": "layers.1.self_attn.in_proj_bias",
    "layers.1.self_attn.linear_out.weight": "layers.1.self_attn.out_proj.weight",
    "layers.1.self_attn.linear_out.bias": "layers.1.self_attn.out_proj.bias"
}

# new_model_path = sys.argv[1]
new_model_path = "/apdcephfs/share_976139/users/hongfeixue/model/IdealLLM-qwen3-nolora"
WENET_MODEL_PATH = "/apdcephfs/private_hongfeixue/checkpoint/mlcslm/IdealLLM-ctc-qwen3-base/step2ori/avg_2_mergelora.pt"
WENET_MODEL_PATH = "/apdcephfs/private_hongfeixue/checkpoint/mlcslm/IdealLLM-ctc-qwen3-base/step1/final.pt"
WHISPER_MODEL_PATH = "/apdcephfs/share_976139/users/hongfeixue/model/whisper-large-v3"
QWEN_MODEL_PATH = "/apdcephfs/share_976139/users/hongfeixue/model/Qwen3-8B-base"

ORIG_QWEN_AUDIO_PATH = "/apdcephfs/share_976139/users/hongfeixue/model/Qwen2-Audio-7B-Instruct"
# generate new config
config = build_merged_config(WHISPER_MODEL_PATH, QWEN_MODEL_PATH, new_model_path)

# whisper_model = WhisperForConditionalGeneration.from_pretrained(WHISPER_MODEL_PATH)

# qwen_model = Qwen2ForCausalLM.from_pretrained(QWEN_MODEL_PATH)

new_model = IdealLLMModel(config)

# load to current model
wenet_checkpoint = torch.load(WENET_MODEL_PATH, map_location='cpu', mmap=True)

audio_tower_dict = {}
speech_model2_dict = {}
language_model_dict = {}
speech_transformer_adapter1_dict = {}
speech_transformer_adapter2_dict = {}
trainable_prompts_dict = {}
downsample_projector1_dict = {}
downsample_projector2_dict = {}
language_class_dict = {}
weight_network_dict = {}
ctc_dict = {}
for key, value in wenet_checkpoint.items():
    if 'encoder.encoders' in key:
        key = key.replace('encoder.encoders.', '')
        key = key.replace('blocks', 'layers')
        audio_tower_dict.update({key: value})
    elif 'encoder2.' in key:
        key = key.replace('encoder2.upstream', 'upstream')
        key = key.replace('encoder2.', '')
        speech_model2_dict.update({key: value})
    elif 'llm.' in key:
        key = key.replace('llm.', '')
        language_model_dict.update({key: value})
    elif 'speech_transformer_adapter1.' in key:
        key = key.replace('speech_transformer_adapter1.', '')
        speech_transformer_adapter1_dict.update({key: value})
    elif 'speech_transformer_adapter2.' in key:
        key = key.replace('speech_transformer_adapter2.', '')
        speech_transformer_adapter2_dict.update({key: value})
    elif 'trainable_prompts' in key:
        new_model.trainable_prompts.load_state_dict({key.replace('trainable_prompts.', ''): value})
    elif 'downsample_projector1' in key:
        key = key.replace('downsample_projector1.', '')
        downsample_projector1_dict.update({key: value})
    elif 'downsample_projector2' in key:
        key = key.replace('downsample_projector2.', '')
        downsample_projector2_dict.update({key: value})
    elif "weight_network" in key:
        weight_network_dict.update({key.replace('weight_network.', ''): value})
    elif "language_class" in key:
        language_class_dict.update({key.replace('language_class.', ''): value})
    elif "ctc" in key:
        ctc_dict.update({key.replace('ctc.ctc_lo.', ''): value})
    else:
        print(key)


# new_model.speech_model1.load_state_dict(audio_tower_dict)
# new_model.speech_model2.load_state_dict(speech_model2_dict)
new_model.language_model.load_state_dict(language_model_dict)
new_model.speech_transformer_adapter1.load_state_dict(speech_transformer_adapter1_dict)
new_model.speech_transformer_adapter2.load_state_dict(speech_transformer_adapter2_dict)
new_model.downsample_projector1.load_state_dict(downsample_projector1_dict)
new_model.downsample_projector2.load_state_dict(downsample_projector2_dict)
new_model.weight_network.load_state_dict(weight_network_dict)
new_model.language_class.load_state_dict(language_class_dict)
new_model.ctc_linear.load_state_dict(ctc_dict)



# load processors
whisper_extractor = WhisperFeatureExtractor.from_pretrained(WHISPER_MODEL_PATH)
qwen_tokenizer = AutoTokenizer.from_pretrained(QWEN_MODEL_PATH)
processor = Qwen2AudioProcessor( # 自动识别组合类型
    feature_extractor=whisper_extractor,
    tokenizer=qwen_tokenizer,
)
# pdb.set_trace() 
new_model.save_pretrained(new_model_path)
processor.save_pretrained(new_model_path)

# CHECK
# new_processor = AutoProcessor.from_pretrained(new_model_path)
# model = IdealLLMForConditionalGeneration.from_pretrained(new_model_path)

print(f"NEW MERGED MODEL Output TO {new_model_path}")
# pdb.set_trace()
