# Copyright (c) 2022 Binbin Zhang (binbzha@qq.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import torch
from wenet.transducer.joint import TransducerJoint
from wenet.transducer.predictor import (ConvPredictor, EmbeddingPredictor,
                                        RNNPredictor)
from wenet.transducer.transducer import Transducer
from wenet.transformer.asr_model import ASRModel
from wenet.transformer.cmvn import GlobalCMVN
from wenet.transformer.ctc import CTC
from wenet.transformer.decoder import BiTransformerDecoder, TransformerDecoder
from wenet.transformer.encoder import ConformerEncoder, TransformerEncoder
from wenet.squeezeformer.encoder import SqueezeformerEncoder
from wenet.efficient_conformer.encoder import EfficientConformerEncoder
from wenet.utils.cmvn import load_cmvn

from wenet.baichuan.configuration_baichuan import BaiChuanConfig, BaiChuanConfig_small
from wenet.baichuan.modeling_baichuan import BaiChuanForCausalLM

#from wenet.cllama2.modeling_cllama2 import LlamaModel, LlamaForCausalLM
#from wenet.cllama2.modeling_cllama2 import LlamaModel, LlamaForCausalLM
#from wenet.cllama2.configuration_cllama2 import LlamaConfig

from wenet.qwen.modeling_qwen import QWenLMHeadModel
from wenet.qwen.modeling_qwen2 import Qwen2ForCausalLM
from wenet.qwen.configuration_qwen import QWenConfig
from wenet.qwen.configuration_qwen2 import Qwen2Config
from cosyvoice.utils.common import ras_sampling

from transformers import AutoModelForCausalLM

def init_model(configs):
    input_dim = configs['input_dim']
    vocab_size = configs['output_dim']
    text_encoder_input_size = configs['model_conf']['text_encoder_input_size']
    llm_input_size = configs['model_conf']['llm_input_size']
    llm_output_size= configs['model_conf']['llm_output_size']
    text_token_size = configs['model_conf']['text_token_size']
    speech_token_size = configs['model_conf']['speech_token_size']
    length_normalized_loss = configs['model_conf']['length_normalized_loss']
    lsm_weight = configs['model_conf']['lsm_weight']
    llm_type = configs.get('llm', 'baichuan')
    
    text_encoder = ConformerEncoder(**configs['text_encoder_conf'])

    llm = TransformerEncoder(**configs['llm_conf'])
    #sampling = ras_sampling(top_p=0.8,top_k=25,win_size=10,tau_r=0.1)


    model = ASRModel(text_encoder_input_size=text_encoder_input_size,
                     llm_input_size=llm_input_size,
                     llm_output_size=llm_output_size,
                     text_token_size=text_token_size,
                     speech_token_size=speech_token_size,
                     text_encoder=text_encoder,
                     llm=llm,
                     sampling=None,
                     length_normalized_loss=length_normalized_loss,
                     lsm_weight=lsm_weight)
    return model

