import torch
import time
from kokoro import KPipeline, KModel
import soundfile as sf

# step 2：加载音色
# voice_zf = "zf_001"
# voice_zf_tensor = torch.load(f'ckpts/kokoro-v1.1/voices/{voice_zf}.pt', weights_only=True)
# voice_af = 'af_maple'
# voice_af_tensor = torch.load(f'ckpts/kokoro-v1.1/voices/{voice_af}.pt', weights_only=True)

voice_zf_tensor = torch.load('./zf_003.pt', weights_only=True)

# step 3：加载模型
repo_id = 'hexgrad/Kokoro-82M-v1.1-zh'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model_path = './kokoro-v1_1-zh.pth'
config_path = './config.json'
model = KModel(model=model_path, config=config_path, repo_id=repo_id).to(device).eval()


# step 4：开始推理
def speed_callable(len_ps):
    speed = 0.8
    if len_ps <= 83:
        speed = 1
    elif len_ps < 183:
        speed = 1 - (len_ps - 83) / 500
    return speed * 1.1


zh_pipeline = KPipeline(lang_code='z', repo_id=repo_id, model=model)
# sentence = '如果您愿意帮助进一步完成这一使命，请考虑为此贡献许可的音频数据。'
sentence = '这是一次伟大的尝试，我们将使用更伟大的理想'
start_time = time.time()
generator = zh_pipeline(sentence, voice=voice_zf_tensor, speed=speed_callable)
result = next(generator)
wav = result.audio
speech_len = len(wav) / 24000
print('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
sf.write('output.wav', wav, 24000)

