import sys
sys.path.append('third_party/Matcha-TTS')
from cosyvoice.cli.cosyvoice import CosyVoice, CosyVoice2
from cosyvoice.utils.file_utils import load_wav
import torchaudio
import os

from modelscope import snapshot_download


def my_snapshot_download(model_dir):
    if not os.path.exists(model_dir):
        model_dir = snapshot_download(model_dir)
my_snapshot_download('iic/CosyVoice2-0.5B')
my_snapshot_download('iic/CosyVoice-ttsfrd')

# cosyvoice = CosyVoice2('iic/CosyVoice2-0.5B', load_jit=False, load_trt=False, fp16=False)

cosyvoice = CosyVoice2(
    'iic/CosyVoice2-0.5B',  # 模型 ID（已确认能从 ModelScope 下载）
    load_jit=True,         # 关闭 JIT（依赖 GPU）
    load_trt=True,         # 关闭 TensorRT（依赖 GPU）
    load_vllm=True,        # 关闭 vllm（依赖 GPU，无 GPU 时必关）
    fp16=True              # 关闭 FP16（CPU 不支持，仅 GPU 支持）
)

# NOTE if you want to reproduce the results on https://funaudiollm.github.io/cosyvoice2, please add text_frontend=False during inference
# zero_shot usage
prompt_speech_16k = load_wav('./asset/zero_shot_prompt.wav', 16000)
for i, j in enumerate(cosyvoice.inference_zero_shot('收到好友从远方寄来的生日礼物，那份意外的惊喜与深深的祝福让我心中充满了甜蜜的快乐，笑容如花儿般绽放。', '希望你以后能够做的比我还好呦。', prompt_speech_16k, stream=False)):
    torchaudio.save('./asset/__zero_shot_prompt{}.wav'.format(i), j['tts_speech'], cosyvoice.sample_rate)
#
# # fine grained control, for supported control, check cosyvoice/tokenizer/tokenizer.py#L248
# for i, j in enumerate(cosyvoice.inference_cross_lingual('在他讲述那个荒诞故事的过程中，他突然[laughter]停下来，因为他自己也被逗笑了[laughter]。', prompt_speech_16k, stream=False)):
#     torchaudio.save('fine_grained_control_{}.wav'.format(i), j['tts_speech'], cosyvoice.sample_rate)
#
# # instruct usage
# for i, j in enumerate(cosyvoice.inference_instruct2('收到好友从远方寄来的生日礼物，那份意外的惊喜与深深的祝福让我心中充满了甜蜜的快乐，笑容如花儿般绽放。', '用四川话说这句话', prompt_speech_16k, stream=False)):
#     torchaudio.save('instruct_{}.wav'.format(i), j['tts_speech'], cosyvoice.sample_rate)