from model_agent import do_TTS, init_model_my
gpu_id = 7
# checkpoint_path = "/mnt/sfs/asr/ckpt/qwen2_multi_task_4_6gpus_gxl_adapter/epoch26_cosyvoice1_new-set_token_10w_s2s/step_84999.pt"
# config_path = "../conf/config_llm_huawei_base-version_cosyvoice1-token.yaml"
# checkpoint_path = "/mnt/sfs/asr/code/osum_xlgeng/examples/wenetspeech/whisper/exp/epoch_31_LLMinstruct_cosyvoice1_10Wtts_1WenTTS_2Khqtts_1KenS2S_3Ks2s_5Ws2t/step_59999.pt"
checkpoint_path = "/mnt/sfs/asr/code/osum_xlgeng/examples/wenetspeech/whisper/exp/epoch_32_LLMinstruct_cosyvoice1_10Wtts_1WenTTS_2Khqtts_1KenS2S_3Ks2s_5Ws2t/step_19999.pt"
config_path = "../conf/config_llm_huawei_instruct-version_cosyvoice1-token.yaml"
model, tokenizer, device = init_model_my(gpu_id=gpu_id, checkpoint_path=checkpoint_path, config_path=config_path)
while True:
    text = input("请输入要合成的文本: ")
    wav_path = input("请输入合成语音保存路径,如果None则合成语音不保存，只返回token: ")
    if wav_path == "None":
        wav_path = None
    if text == "quit":
        break
    response = do_TTS(device=device, model=model, text_for_tts=text, output_wav_path=wav_path)
    print("合成语音内容:", response)
