import ChatTTS
import torch
import torchaudio
from gxl_ai_utils.utils import utils_file

# 加载字典列表
dict_list = utils_file.load_dict_list_from_jsonl("/home/node27_tmpdata/xlgeng/chat_text/gxl_all_chat_text.jsonl")
output_wav_dir = "/home/node27_tmpdata/xlgeng/chat_text/wav"
utils_file.makedir(output_wav_dir)

# 提取所有的 'A' 字段
import sys
import os
def do_generate(dict_list_tmp, gpu_id):
    texts = [dict_i['A'] for dict_i in dict_list_tmp]
    keys = [dict_i['key'] for dict_i in dict_list_tmp]

    # 初始化 ChatTTS
    # torch.cuda.set_device(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    chat = ChatTTS.Chat()
    chat.load(compile=False)  # 如果需要性能优化，设置为 True

    # 每次推理5条文本
    batch_size = 5
    for i in range(0, len(texts), batch_size):
        # 获取当前批次的文本
        texts_batch = texts[i:i + batch_size]

        # 进行推理
        wavs = chat.infer(texts_batch)

        # 保存每个生成的音频文件
        for j, wav in enumerate(wavs):
            true_index = i + j
            output_path = f"{output_wav_dir}/{keys[true_index]}.wav"
            try:
                # 保存音频
                torchaudio.save(output_path, torch.from_numpy(wav).unsqueeze(0), 24000)
            except:
                torchaudio.save(output_path, torch.from_numpy(wav), 24000)


if __name__ == "__main__":
    """"""
    dict_list_8_split = utils_file.do_split_list(dict_list, 8)
    runner = utils_file.GxlDynamicProcessPool()
    for i in range(8):
        runner.add_task(do_generate, [dict_list_8_split[i], i])
    runner.run()
