import logging

import torch

from wenet.llm_asr.llmasr_model import LLMASR_Model
from wenet.one_embedding.speech_encoder import init_speech_encoder
from wenet.transformer.cmvn import GlobalCMVN
from wenet.utils.checkpoint import load_checkpoint, load_trained_modules
from wenet.utils.cmvn import load_cmvn

from gxl_ai_utils.utils import utils_file

def init_llmasr(args, configs):
    llm_path = configs["llm_path"]
    lora = configs["use_lora"]
    lora_alpha = configs["lora_alpha"]
    lora_rank = configs["lora_rank"]
    lora_dropout = configs["lora_dropout"]
    is_inference = configs.get("is_inference", False)
    utils_file.logging_limit_print(f"init_llmasr()：开始初始化模型，参数：{args}, {configs}")
    utils_file.logging_limit_print(f"init_llmasr()：is_inference={is_inference}")
    # prompt_pattern = configs['prompt_pattern']

    encoder_output_dim = -1
    speech_encoder = init_speech_encoder(configs, configs['osum_path'])
    # speech_encoder = init_speech_encoder(configs, "/mnt/sfs/asr/code/wenet_undersdand_and_speech_xlgeng/examples/wenetspeech/whisper/exp/epoch21_cosyvoice1_new-set_token_1w_plus-multi_task/step_49999.pt")

    model = LLMASR_Model(
        encoder = speech_encoder,
        llm_path=llm_path,
        query_num = configs['query_num'],
        lora=lora,
        lora_alpha=lora_alpha,
        lora_rank=lora_rank,
        lora_dropout=lora_dropout,
        is_inference=is_inference,
    )

    utils_file.print_model_size(model.encoder)
    utils_file.print_model_size(model.llama_model)
    # utils_file.print_model_size(model.speech_transformer)
    # utils_file.print_model_size(model.speech_llama_proj)

    logging.info(f'init_salmonn()：开始加载初始化模型')
    if hasattr(args, 'checkpoint') and args.checkpoint is not None:
        logging.info(f' 设置了初始化模型位置，开始加载，参数文件位置：{args.checkpoint}')
        infos = load_checkpoint(model, args.checkpoint)
    elif hasattr(args, 'checkpoint') and args.enc_init is not None:
        infos = load_trained_modules(model, args)
    else:
        infos = {}

    if configs.get('init_step', False):
        infos = {}
    configs["init_infos"] = infos
    print(configs)
    logging.info('加载初始化模型完毕')

    if not is_inference:
        logging.info('不更换LLM的参数')
    else:
        logging.info('不更换LLM的参数')

    logging.info('开始选择性冻结模块')
    fire_module = configs.get("fire_module", None)
    if fire_module is None:
        logging.info('没有选择解冻的模块,也就是没有训练参数，直接报错返回')
        raise ValueError('没有选择解冻的模块,也就是没有训练参数，直接报错返回')
    for k, p in model.named_parameters():
        if fire_module == 'llm':
            if not k.startswith("llama_model"):
                p.requires_grad = False
        elif fire_module == 'link_and_encoder':
            # 这里和speech token相关的层不会被冻结
            if k.startswith("llama_model"):
                p.requires_grad = False
        elif fire_module == "link_and_encoder_and_lora":
            break
        logging.info(f"{k} {p.requires_grad}")
    logging.info('冻结完毕')

    return model, configs
