from transformers import AutoModelForCausalLM, AutoTokenizer
from cosyvoice.cli.cosyvoice import CosyVoice
from cosyvoice.utils.file_utils import load_wav
from funasr import AutoModel
import torchaudio
import asyncio

class CosyVoiceCli:
    def __init__(self):
        # 初始化模型和tokenizer
        self.cosyvoice = CosyVoice(r'F:\CosyVoice-300M-SFT',load_jit=True, load_onnx=False, fp16=True)

    async def cosyvoice_inference(self, out_folder_path, text_input):
        # prompt_speech_16k = load_wav(r'./output/audio_360.wav', 16000)
        # for i, j in enumerate(self.cosyvoice.inference_sft(f'{text_input}', '中文女', stream=False)):
        for i, j in enumerate(self.cosyvoice.inference_sft(f'{text_input}', '中文女', stream=False)):
            torchaudio.save(out_folder_path, j['tts_speech'], 22050)


# async def main():
#     # 初始化CosyVoiceCli实例
#     cli = CosyVoiceCli()
#     print(cli.cosyvoice.list_avaliable_spks())
#     await cli.cosyvoice_inference('output3','你好啊，我是cosyvoice')

# asyncio.run(main())

