from kokoro import KPipeline, KModel
#要下kokoro,pypinyin，ordered_set,jieba,cn2an
import soundfile as sf
import os
import time
import threading
from datetime import datetime
config = '.\82M\config.json'
model = '.\82M\kokoro-v1_1-zh.pth'
voice = './82M/voices/zm_009.pt'
# 参数中config与model修改为本地模型路径
model = KModel(config=config, model=model,repo_id="hexgrad/Kokoro-82M")
pipeline = KPipeline(lang_code='z', model=model,repo_id="hexgrad/Kokoro-82M")


def play_sound(path,remove=False):
    os.system(f"mplayer -quiet -noconfig all {path}")
    if remove:
        os.remove(path)

def speed_callable(len_ps):
    speed = 0.8
    if len_ps <= 83:
        speed = 1
    elif len_ps < 183:
        speed = 1 - (len_ps - 83) / 500
    return speed * 1.1

with open("t.txt", "r", encoding="utf-8") as f:
    text = f.read()  # 读取所有内容为字符串
generator = pipeline(
    text, 
    voice=voice,
    speed=speed_callable, 
    split_pattern=r'[,，.。!！?？;；\n]+',
    #noise_scale=0.667,  # 调整噪声比例
    # noise_scale_w=0.8,  # 调整音素持续时间的变化
    #length_scale=1.0    # 调整语音长度
)
all_audio_segments = []
# for i, (gs, ps, audio) in enumerate(generator):
#     print(i)  # i => index
#     print(gs) # gs => graphemes/text
#     print(ps) # ps => phonemes
#     display(Audio(data=audio, rate=24000, autoplay=i==0))
#     sf.write(f'{i}.wav', audio, 24000)

start_time = time.time()
for i, (gs, ps, audio) in enumerate(generator):
    print(f"处理第 {i+1} 段:")
    print(f"文本: {gs}")  # 打印当前处理的文本
    print(f"音素: {ps}")  # 打印音素
    all_audio_segments.append(audio)
    
    # 可以选择是否保存单独的片段
    #sf.write(f'./output/segment_{i}.wav', audio, 24000)

# 合并所有音频片段
import numpy as np
combined_audio = np.concatenate(all_audio_segments)
os.makedirs("./output", exist_ok=True)  # 不存在则创建，存在则不报错
current_time = datetime.now().strftime("%Y%m%d%H%M%S")
music_path = f'./output/{current_time}.mp3'
sf.write(music_path, combined_audio, 22050)
end_time = time.time()
print(f"声音合成时间：{end_time-start_time:.4f} 秒")

thread1 = threading.Thread(target=play_sound, args=(music_path,True ))
thread1.start()
print(f"所有片段已合并并保存为{music_path}")
thread1.join()