from wenet.salmonn.salmonn import Salmonn_Model
# from wenet.transformer.whisper_encoder import OpenAIWhisperEncoder
from wenet.transformer.hubert_encoder import S3prlFrontend

def init_salmonn(configs):
    llm_path = configs["llm_path"]
    vicuna_low_resource = configs["vicuna_low_resource"]
    speech_qformer_token_num = configs["speech_qformer_token_num"]
    speech_qformer_layer = configs["speech_qformer_layer"]
    lora = configs["use_lora"]
    lora_alpha = configs["lora_alpha"]
    lora_rank = configs["lora_rank"]
    lora_dropout = configs["lora_dropout"]
    second_per_frame = configs["second_per_frame"]
    second_stride = configs["second_stride"]
    llama_model_generate_max_length = configs["llama_model_generate_max_length"]
    llama_model_generate_min_length = configs["llama_model_generate_min_length"]
    llama_model_generate_num_beams = configs["llama_model_generate_num_beams"]
    llama_model_generate_do_sample = configs["llama_model_generate_do_sample"]
    llama_model_generate_top_p = configs["llama_model_generate_top_p"]
    llama_model_generate_repetition_penalty = configs["llama_model_generate_repetition_penalty"]
    llama_model_generate_length_penalty = configs["llama_model_generate_length_penalty"]
    llama_model_generate_temperature = configs["llama_model_generate_temperature"]
    load_epoch_ckpt = configs["load_epoch_ckpt"]
    load_step_ckpt = configs["load_step_ckpt"]
    load_eval_ckpt = configs["load_eval_ckpt"]
    ckpt_path = configs["ckpt_path"]
    # for whisper encoder
    # encoder = OpenAIWhisperEncoder(**configs['encoder_conf'])
    # for hubert encoder
    encoder = S3prlFrontend(**configs['frontend_conf'])
    model = Salmonn_Model(
        encoder=encoder,
        llm_path=llm_path,
        speech_qformer_token_num=speech_qformer_token_num,
        speech_qformer_layer=speech_qformer_layer,
        lora=lora,
        lora_alpha=lora_alpha,
        lora_rank=lora_rank,
        lora_dropout=lora_dropout,
        second_per_frame=second_per_frame,
        second_stride=second_stride,
        low_resource=vicuna_low_resource,
        llama_model_generate_max_length=llama_model_generate_max_length,
        llama_model_generate_min_length=llama_model_generate_min_length,
        llama_model_generate_num_beams=llama_model_generate_num_beams,
        llama_model_generate_do_sample=llama_model_generate_do_sample,
        llama_model_generate_top_p=llama_model_generate_top_p,
        llama_model_generate_repetition_penalty=llama_model_generate_repetition_penalty,
        llama_model_generate_length_penalty=llama_model_generate_length_penalty,
        llama_model_generate_temperature=llama_model_generate_temperature,
        load_epoch_ckpt=load_epoch_ckpt,
        load_step_ckpt=load_step_ckpt,
        load_eval_ckpt=load_eval_ckpt,
        ckpt_path=ckpt_path,
        )
    
    return model