import sys
import pyaudio
import torch
import numpy as np
import librosa
import torchaudio
import random

sys.path.append('third_party/Matcha-TTS')
from cosyvoice.cli.cosyvoice import CosyVoice2

# 严格匹配官方音频处理参数
max_val = 0.8
TOP_DB = 60
HOP_LENGTH = 220
WIN_LENGTH = 440
SAMPLE_RATE = 24000  # CosyVoice的标准采样率

# 初始化PyAudio音频流
p = pyaudio.PyAudio()
stream = p.open(
    format=pyaudio.paFloat32,
    channels=1,
    rate=SAMPLE_RATE,
    output=True,
    frames_per_buffer=1024
)

# 初始化语音合成模型 - 严格匹配官方参数
cosyvoice = CosyVoice2(
    'pretrained_models/CosyVoice2-0.5B',
    load_jit=False,
    load_trt=False,
    fp16=False
)
# 设置随机种子确保结果可复现
seed = random.randint(1, 100000)
torch.manual_seed(seed)

# 官方精确复制版音频处理功能
def official_audio_processing(audio_data, sample_rate=SAMPLE_RATE):
    """
    完全复制CosyVoice官方demo的音频处理流程
    """
    # 1. 转换为PyTorch张量
    if isinstance(audio_data, np.ndarray):
        speech = torch.from_numpy(audio_data).float()
    else:
        speech = audio_data.float()
    
    # 2. 单声道处理
    if len(speech.shape) > 1:
        if speech.shape[0] > 1:
            speech = speech.mean(0, keepdim=True)
        speech = speech.squeeze(0)
    
    # 3. 精确的静音修剪 (完全匹配官方实现)
    speech = speech.unsqueeze(0)
    speech, _ = librosa.effects.trim(
        speech.squeeze(0).numpy(),
        top_db=TOP_DB,
        frame_length=WIN_LENGTH,
        hop_length=HOP_LENGTH
    )
    speech = torch.tensor(speech).float().unsqueeze(0)
    
    # 4. 精确的音量归一化
    max_amp = speech.abs().max()
    if max_amp > max_val:
        speech = speech / max_amp * max_val
    
    # 5. 添加结尾静音缓冲 (官方流程)
    silence_duration = int(0.2 * SAMPLE_RATE)
    speech = torch.concat(
        [speech, torch.zeros(1, silence_duration)],
        dim=1
    )
    
    return speech

# 加载提示音频并完全匹配官方处理
def load_and_process_prompt(file_path):
    """
    严格按照官方demo的提示音频处理流程
    """
    # 使用torchaudio加载保持原始精度
    speech, sr = torchaudio.load(file_path)
    
    # 转为单声道并重采样到模型需要的16kHz (注意! 官方零样本模型需要16kHz提示)
    if sr != 16000:
        speech = torchaudio.transforms.Resample(
            orig_freq=sr, new_freq=16000
        )(speech)
    
    # 应用官方处理流程
    return official_audio_processing(speech, sample_rate=16000)

# 流式合成与播放 (使用官方推荐参数)
def stream_and_play(text, prompt_text, prompt_audio):
    """
    完全匹配官方demo的调用参数
    """
    # 使用模型推荐的调用参数
    generator = cosyvoice.inference_zero_shot(
        text,
        prompt_text,
        prompt_audio,
        stream=True,
        speed=1.0 # 保持标准速度
    )
    
    for chunk in generator:
        audio_tensor = chunk['tts_speech']
        
        # 1. 应用官方后处理流程
        processed_audio = official_audio_processing(audio_tensor, SAMPLE_RATE)
        
        # 2. 转换为float32 numpy数组
        audio_np = processed_audio.squeeze().cpu().numpy().astype(np.float32)
        
        # 3. 实时播放
        stream.write(audio_np.tobytes())

# 使用示例 - 完全匹配官方demo参数
text = "我真的要谢谢你啊，你真是个天才！"
prompt_text = "你不要让我做饭啦，我什么都能办到，但是真的不会做饭。"

# 精确处理提示音频 (关键!)
processed_prompt = load_and_process_prompt('asset/ying.mp3')

# 打印关键参数用于调试
print(f"使用随机种子: {seed}")
print(f"提示音频参数: shape={processed_prompt.shape}, min={processed_prompt.min():.4f}, max={processed_prompt.max():.4f}")

# 开始流式合成
stream_and_play(text, prompt_text, processed_prompt)

# 清理资源
stream.stop_stream()
stream.close()
p.terminate()