from gxl_ai_utils.utils import utils_file
from model_agent import do_TTS, init_model_my

def convert_text():
    input_text_file = "input_data/tts_test.txt"
    lines = utils_file.load_list_file_clean(input_text_file)
    text_dict = {}
    for line in lines:
        line_split = line.split('|')
        text_dict[line_split[0]] = line_split[1]
    output_text_file = "input_data/tts_test.scp"
    utils_file.write_dict_to_scp(text_dict, output_text_file)
    print(text_dict)



if __name__ == '__main__':
    # input_text_scp, output_token_file, output_wav_file,gpu_id = utils_file.do_get_commandline_param(4, ['input_text_scp', 'output_token_file', "output_wav_file", 'gpu_id'] )
    output_res_dict = {}
    gpu_id = 7
    checkpoint= "/mnt/sfs/asr/ckpt/qwen2_multi_task_4_6gpus_gxl_adapter/full_train_llm_3B_epoch_0/step_22499.pt"
    config = "../conf/config_llm_huawei_instruct_3B_cosyvoice1-token.yaml"
    model, tokenizer, device = init_model_my(gpu_id, checkpoint, config)
    print(do_TTS(model, device, "你知道吗"))
    while True:
        value = input("请输入文本:")
        res = do_TTS(model, device, value)
        print(res)





