import wave
import sys
from piper import PiperVoice
from piper.config import SynthesisConfig

# 模型路径
tts_onnx_path = "en_US-lessac-medium.onnx"

# 创建合成配置
syn_config = SynthesisConfig(
    volume=0.5,       # 音量降低一半
    length_scale=2.0, # 语速减慢一倍
    noise_scale=1.0,  # 更多音频变化
    noise_w_scale=1.0, # 更多说话变化
    normalize_audio=False, # 使用原始音频
)

# 加载语音模型（可选GPU加速）
# 如需使用GPU加速，请确保已安装 onnxruntime-gpu
use_gpu = False  # 设置为True启用GPU
voice = PiperVoice.load(tts_onnx_path, use_cuda=use_gpu)

# 示例1：使用配置合成到WAV文件
with wave.open("test_with_config.wav", "wb") as wav_file:
    voice.synthesize_wav(
        "Welcome to the world of speech synthesis with configuration!", 
        wav_file,
        syn_config=syn_config
    )

# 示例2：流式合成
print("开始流式合成...")
text_to_synthesize = "This is a streaming synthesis example. You can process audio chunks as they are generated."

# 模拟流式处理
def process_audio_chunk(chunk):
    """处理音频块的函数"""
    print(f"接收到音频块: 采样率={chunk.sample_rate}Hz, "
          f"位宽={chunk.sample_width}位, "
          f"声道数={chunk.sample_channels}, "
          f"数据大小={len(chunk.audio_int16_bytes)}字节")
    
    # 在实际应用中，您可以在这里处理音频数据
    # 例如：发送到音频设备、保存到文件或通过网络传输

# 流式合成并处理每个音频块
for chunk in voice.synthesize(text_to_synthesize, syn_config=syn_config):
    process_audio_chunk(chunk)

# 将流式合成的所有块保存到单个WAV文件
print("将流式合成结果保存到文件...")
with wave.open("test_streaming.wav", "wb") as wav_file:
    first_chunk = True
    for chunk in voice.synthesize(text_to_synthesize, syn_config=syn_config):
        if first_chunk:
            # 设置WAV文件参数（仅在第一个块时设置）
            wav_file.setnchannels(chunk.sample_channels)
            wav_file.setsampwidth(chunk.sample_width)
            wav_file.setframerate(chunk.sample_rate)
            first_chunk = False
        
        # 写入音频数据
        wav_file.writeframes(chunk.audio_int16_bytes)

print("处理完成!")
    
    