import torch
import torch_directml
from spark_tts_lib import SparkTTS
import soundfile as sf
import sounddevice as sd
import numpy as np

dml_device = torch_directml.device()
# 检查设备状态
print(f"DirectML可用性: {torch_directml.is_available()}")  # 应返回True
print(f"当前设备: {dml_device}")

model = SparkTTS("./framework/frontend/Spark-TTS-0.5B", torch_directml.device("cpu"))

text = "宋英辰的脑子好像进水了"
prompt_speech_path = "D:\\myproject\\git\\fish_audio\\fish-speech\data\\test1.wav"
prompt_text = "我坐在夏天的长椅上，星星像雨一样砸到水面，粼粼波光向我涌来，带着点点的泥腥，我一时分不清我处在我人生的哪片时光，是那个干燥的夏季，顶着骄阳，在水里捞鱼的我？是圆月悬于顶，和乌云一起睡觉的我？还是那个关上门，蜷成一团的我。我记不清了，记忆不能像东西一样被丢掉，在某个不知名的画面，不特殊的时间，你将会回想起所有，把当时回避的痛苦，完完整整地再看一遍，仿佛你也就回到那时了。"
gender = "male"
pitch = "moderate"
speed = "moderate"
save_path = "./generated_audio.wav"

# with torch.no_grad():
#             wav = model.inference(
#                 text,
#                 prompt_speech_path=prompt_speech_path,
#                 prompt_text=prompt_text,
#                 # gender=gender,
#                 # pitch=pitch,
#                 # speed=speed,
#             )
#             sf.write(save_path, wav, samplerate=16000)


while True:
    text = input("请输入文本（或输入 'exit' 退出）：")
    with torch.no_grad():
            wav = model.inference(
                text,
                prompt_speech_path=prompt_speech_path,
                prompt_text=prompt_text,
                # gender=gender,
                # pitch=pitch,
                # speed=speed,
            )
            # 将张量转换为numpy数组并调整格式
            audio_data = wav.squeeze().astype(np.float32)
            # 播放音频（自动检测设备）
            sd.play(audio_data, samplerate=16000)
            sd.wait()  # 等待播放完成
            sf.write(save_path, wav, samplerate=16000)
