# 说话人嵌入提取
# from speechbrain.pretrained import EncoderClassifier
# import torchaudio
# import numpy as np

# # 初始化模型
# classifier = EncoderClassifier.from_hparams("model/spkrec-ecapa-voxceleb")

# # 加载并预处理音频
# signal, fs = torchaudio.load("record_out.wav")

# # 重采样到16kHz
# if fs != 16000:
#     resampler = torchaudio.transforms.Resample(fs, 16000)
#     signal = resampler(signal)

# # 多声道转单声道
# if signal.dim() > 1 and signal.shape[0] > 1:
#     signal = signal.mean(dim=0, keepdim=True)

# # 提取嵌入
# embedding = classifier.encode_batch(signal).squeeze().detach().cpu().numpy()
# np.save("my_voice_embed.npy", embedding)

# # 语音合成
import torch
from TTS.api import TTS
import numpy as np
import sounddevice as sd

from TTS.utils.radam import RAdam
# print(TTS().list_models())
torch.serialization.add_safe_globals([RAdam])
# 加载支持 speaker embedding 的多说话人模型
tts = TTS(model_name="tts_models/zh-CN/baker/tacotron2-DDC-GST", progress_bar=True, gpu=False)

# 加载保存的嵌入向量
speaker_embedding = np.load("my_voice_embed.npy")

# 合成中英文混读文本
text = "你好，我是你的智能语音助手"

print(tts.speakers)
# 合成语音
wav = tts.tts(
    text=text
    # ,speaker_wav=None  # 不用传原音频
    # ,speaker = "male-pt-3\n"
    # ,language = "en"
    # ,speaker_embedding=speaker_embedding  # 传入我们提取的 npy
)

sd.play(wav, samplerate=22050)
sd.wait()  # 等待播放完成