import re
import numpy as np
import sounddevice as sd
from TTS.api import TTS as CoquiTTS_API
import torch
import collections
from TTS.utils.radam import RAdam
import librosa

# 显式添加 RAdam 和其他必要对象到安全全局列表
torch.serialization.add_safe_globals([RAdam, collections.defaultdict, dict])

# 初始化中文模型
print("正在加载中文模型...")
try:
    tts_zh = CoquiTTS_API("tts_models/zh-CN/baker/tacotron2-DDC-GST", progress_bar=False, gpu=False)
    print("中文模型加载成功:", tts_zh.model_name)
except Exception as e:
    print(f"中文模型加载失败: {e}")
    exit(1)

# 初始化英文模型
print("正在加载英文模型...")
try:
    tts_en = CoquiTTS_API("tts_models/en/ljspeech/vits", progress_bar=False, gpu=False)
    print("英文模型加载成功:", tts_en.model_name)
except Exception as e:
    print(f"英文模型加载失败: {e}")
    exit(1)

# 输入中英文混合文本
text = "锄禾日当午，It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent，汗滴禾下土。"


# 清理中文文本中的标点符号
def clean_zh_text(text):
    # 移除中文文本中可能导致噪声的标点符号
    return re.sub(r'[^\u4e00-\u9fff]', '', text)


# 分割中英文
def split_text(text):
    pattern = r'[\u4e00-\u9fff]+|[^\u4e00-\u9fff]+'
    segments = re.findall(pattern, text)
    lang_segments = []
    for seg in segments:
        is_chinese = bool(re.match(r'^[\u4e00-\u9fff]+$', seg))
        lang_segments.append((clean_zh_text(seg) if is_chinese else seg, 'zh' if is_chinese else 'en'))
    return lang_segments


# 去除音频结尾的静音
def trim_silence(wav, sample_rate, top_db=60):
    wav_trimmed, _ = librosa.effects.trim(wav, top_db=top_db)
    return wav_trimmed


# 分割文本
segments = split_text(text)
print("文本分割结果:", segments)

# 生成音频
wavs = []
sample_rate = tts_zh.synthesizer.tts_config.audio["sample_rate"]  # 假设采样率一致

for seg_text, lang in segments:
    if not seg_text.strip():  # 跳过空文本
        continue
    try:
        if lang == 'zh':
            wav = tts_zh.tts(seg_text)
        else:
            wav = tts_en.tts(seg_text)
        # 去除音频结尾的静音
        wav = trim_silence(np.array(wav), sample_rate, top_db=60)
        wavs.append(wav)
    except Exception as e:
        print(f"生成音频失败 ({seg_text}): {e}")
        continue

# 合并音频
if wavs:
    # 添加短暂的淡入淡出以平滑拼接
    combined_wav = wavs[0]
    for wav in wavs[1:]:
        # 使用简单的线性淡入淡出（0.01秒）
        fade_samples = int(sample_rate * 0.01)
        fade_out = np.linspace(1, 0, fade_samples)
        fade_in = np.linspace(0, 1, fade_samples)
        combined_wav[-fade_samples:] *= fade_out
        wav[:fade_samples] *= fade_in
        combined_wav = np.concatenate([combined_wav, wav])

    # 保存合并后的音频
    tts_zh.synthesizer.save_wav(combined_wav, "output_combined.wav")
    # 播放合并后的音频
    sd.play(combined_wav, samplerate=sample_rate)
    sd.wait()
    print("中英文混合音频已生成并播放，保存为 output_combined.wav")
else:
    print("未生成任何音频片段。")