from gxl_ai_utils.utils import utils_file
from model_agent import do_TTS, init_model_my
from cosyvoice_util import  token_list2wav
def convert_text():
    input_text_file = "input_data/tts_test.txt"
    lines = utils_file.load_list_file_clean(input_text_file)
    text_dict = {}
    for line in lines:
        line_split = line.split('|')
        text_dict[line_split[0]] = line_split[1]
    output_text_file = "input_data/tts_test.scp"
    utils_file.write_dict_to_scp(text_dict, output_text_file)
    print(text_dict)



if __name__ == '__main__':
    input_text_scp, output_token_file, output_wav_file,gpu_id = utils_file.do_get_commandline_param(4, ['input_text_scp', 'output_token_file', "output_wav_file", 'gpu_id'] )
    output_res_dict = {}
    gpu_id = int(gpu_id)
    model, tokenizer, device = init_model_my(gpu_id)
    text_dict = utils_file.load_dict_from_scp(input_text_scp)
    wav_dict_list = []
    index = 0
    for key, value in text_dict.items():
        index += 1
        res = do_TTS(model, device, value)
        print(f'{key} : {res}')
        output_res_dict[key] = res
        if index % 2 == 0:
            utils_file.write_dict_to_scp(output_res_dict, output_token_file)
        wav_apth = token_list2wav(res, file_name=key)
        wav_item_dict = {"key": key, "wav": wav_apth, "txt": value}
        wav_dict_list.append(wav_item_dict)
        if index % 2 == 0:
            utils_file.write_dict_list_to_jsonl(wav_dict_list, output_wav_file)

    # text_dict = utils_file.load_dict_from_scp("input_data/tts_test.scp")
    # wav_dict_list = []
    # index = 0
    # for key, value in text_dict.items():
    #     wav_item_dict = {"key": key, "wav": f"/mnt/sfs/asr/code/osum_xlgeng/examples/wenetspeech/whisper/runtime/data/output_data/test2cosyvoice1-25hz_{key}_gxl.wav", "txt": value}
    #     wav_dict_list.append(wav_item_dict)
    # utils_file.write_dict_list_to_jsonl(wav_dict_list, "output_data/wav_dict_list.jsonl")




