#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import sys
import os

# 设置控制台编码为UTF-8
if sys.platform.startswith('win'):
    # Windows系统编码设置
    import codecs
    sys.stdout = codecs.getwriter('utf-8')(sys.stdout.detach())
    sys.stderr = codecs.getwriter('utf-8')(sys.stderr.detach())
    # 设置环境变量
    os.environ['PYTHONIOENCODING'] = 'utf-8'

import json
import subprocess
import tempfile
from typing import Optional
import base64
import requests
import time
import asyncio
import edge_tts
from pathlib import Path
import hashlib
import shutil
import re
import tempfile

# 尝试延迟导入Coqui TTS（XTTS v2），避免环境未安装时影响其他引擎
_xtts_instance = None

# 尝试延迟导入Piper TTS，避免环境未安装时影响其他引擎
_piper_instance = None

# 尝试延迟导入MNN Piper TTS，避免环境未安装时影响其他引擎
_piper_mnn_instance = None

def _get_xtts_instance():
    global _xtts_instance
    if _xtts_instance is not None:
        return _xtts_instance
    try:
        from TTS.api import TTS  # type: ignore
        print("TTS 库导入成功", file=sys.stderr)
    except Exception as e:
        print(f"TTS 库导入失败: {e}", file=sys.stderr)
        return None
    try:
        # XTTS v2 多语种多说话人模型
        # 是否启用GPU由torch自动决定；若环境不支持GPU，会退回CPU
        print("正在加载 XTTS v2 模型...", file=sys.stderr)
        _xtts_instance = TTS("tts_models/multilingual/multi-dataset/xtts_v2")
        print("XTTS v2 模型加载成功", file=sys.stderr)
        return _xtts_instance
    except Exception as e:
        print(f"XTTS v2 模型加载失败: {e}", file=sys.stderr)
        return None

def _get_piper_instance():
    global _piper_instance
    if _piper_instance is not None:
        return _piper_instance
    try:
        import piper
        print("Piper TTS 库导入成功", file=sys.stderr)
        # 尝试加载中文模型，如果失败则尝试英文模型
        model_paths = [
            "zh_CN-aispeech3-medium.onnx",
            "en_US-amy-low.onnx",
            "zh_CN-aispeech3-low.onnx",
            "zh_CN-aispeech3-high.onnx"
        ]
        
        for model_path in model_paths:
            try:
                if os.path.exists(model_path):
                    print(f"尝试加载 Piper 模型: {model_path}", file=sys.stderr)
                    _piper_instance = piper.PiperVoice.load(model_path)
                    print(f"Piper TTS 模型加载成功: {model_path}", file=sys.stderr)
                    return _piper_instance
            except Exception as e:
                print(f"加载模型 {model_path} 失败: {e}", file=sys.stderr)
                continue
        
        print("所有 Piper 模型都加载失败", file=sys.stderr)
        return None
    except Exception as e:
        print(f"Piper TTS 库导入失败: {e}", file=sys.stderr)
        return None

def _get_piper_mnn_instance():
    global _piper_mnn_instance
    if _piper_mnn_instance is not None:
        return _piper_mnn_instance
    try:
        # 检查 MNN 模型文件
        mnn_model_paths = [
            "piper-voices-MNN/en_US-amy-low_fp16_public.mnn",
            "piper-voices-MNN/en_US-ryan-low_fp16_public.mnn",
            "piper-voices-MNN/en_US-kathleen-low_fp16_public.mnn"
        ]
        
        for model_path in mnn_model_paths:
            if os.path.exists(model_path):
                print(f"找到 MNN Piper 模型: {model_path}", file=sys.stderr)
                _piper_mnn_instance = {
                    "model_path": model_path,
                    "config_path": "piper-voices-MNN/config.json"
                }
                return _piper_mnn_instance
        
        print("未找到 MNN Piper 模型文件", file=sys.stderr)
        return None
    except Exception as e:
        print(f"MNN Piper 检查失败: {e}", file=sys.stderr)
        return None

def clean_text_for_tts(text: str) -> str:
    """清理文本，移除所有可能影响TTS的符号和格式"""
    if not text:
        return ""
    
    # 移除Markdown标题符号
    text = re.sub(r'^#{1,6}\s+', '', text, flags=re.MULTILINE)
    
    # 移除粗体符号
    text = re.sub(r'\*\*(.*?)\*\*', r'\1', text)
    text = re.sub(r'__(.*?)__', r'\1', text)
    
    # 移除斜体符号
    text = re.sub(r'\*([^*]+)\*', r'\1', text)
    text = re.sub(r'_([^_]+)_', r'\1', text)
    
    # 移除代码块
    text = re.sub(r'```[\s\S]*?```', '', text)
    
    # 移除行内代码
    text = re.sub(r'`([^`]+)`', r'\1', text)
    
    # 移除链接
    text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text)
    
    # 移除图片
    text = re.sub(r'!\[([^\]]*)\]\([^)]+\)', r'\1', text)
    
    # 移除引用
    text = re.sub(r'^>\s+', '', text, flags=re.MULTILINE)
    
    # 移除列表符号
    text = re.sub(r'^[\s]*[-*+]\s+', '', text, flags=re.MULTILINE)
    text = re.sub(r'^[\s]*\d+\.\s+', '', text, flags=re.MULTILINE)
    
    # 移除水平分割线
    text = re.sub(r'^[\s]*[-*_]{3,}[\s]*$', '', text, flags=re.MULTILINE)
    
    # 移除表格符号
    text = text.replace('|', ' ')
    
    # 移除HTML标签
    text = re.sub(r'<[^>]+>', '', text)
    
    # 移除剩余的Markdown符号
    text = text.replace('**', '').replace('##', '').replace('###', '').replace('####', '').replace('#####', '').replace('######', '')
    
    # 清理多余的空行和空白
    text = re.sub(r'\n\s*\n', '\n', text)
    text = text.strip()
    
    return text

# =============== 提速多声音实现（并发 + 合并 + 缓存 + concat） ===============
async def _edge_tts_save(text: str, voice: str, file_path: str, sem: asyncio.Semaphore):
    async with sem:
        communicate = edge_tts.Communicate(text, voice)
        await communicate.save(file_path)


def _merge_consecutive_by_role(messages, max_chars_per_chunk: int = 2000):
    merged = []
    current = None
    current_len = 0
    for msg in messages:
        role = msg.get('role')
        content = msg.get('content', '')
        if not content:
            continue
        # 把思考内容也拼进去（可选）
        if msg.get('reasoningContent'):
            content = f"思考过程：{msg['reasoningContent']}\n回答：{content}"
        if current and current['role'] == role and (current_len + len(content) <= max_chars_per_chunk):
            current['content'] += "\n" + content
            current_len += len(content)
        else:
            if current:
                merged.append(current)
            current = {'role': role, 'content': content}
            current_len = len(content)
    if current:
        merged.append(current)
    return merged


def text_to_speech_edge_tts_multi_voice(conversation_data, output_path):
    try:
        print(f"开始多声音TTS，对话条数: {len(conversation_data)}", file=sys.stderr)

        male_voices = [
            "zh-CN-YunxiNeural",
            "zh-CN-YunyangNeural",
            "zh-CN-YunfengNeural",
            "zh-CN-YunhaoNeural",
        ]
        female_voices = [
            "zh-CN-XiaoxiaoNeural",
            "zh-CN-XiaochenNeural",
            "zh-CN-XiaohanNeural",
            "zh-CN-XiaomoNeural",
            "zh-CN-XiaoxuanNeural",
        ]

        male_voice = male_voices[0]
        female_voice = female_voices[0]
        print(f"使用男声: {male_voice}, 女声: {female_voice}", file=sys.stderr)

        # 合并相邻同角色消息，减少调用次数
        merged_messages = _merge_consecutive_by_role(conversation_data)
        print(f"合并后段数: {len(merged_messages)}", file=sys.stderr)
        
        # 清理每条消息的文本内容
        for message in merged_messages:
            if 'content' in message:
                message['content'] = clean_text_for_tts(message['content'])
                print(f"清理后文本长度: {len(message['content'])}", file=sys.stderr)

        temp_dir = Path(tempfile.mkdtemp())
        print(f"创建临时目录: {temp_dir}", file=sys.stderr)
        segment_files = []

        # 简单磁盘缓存目录（与脚本同级 .tts_cache）
        cache_dir = Path(os.path.dirname(__file__)) / ".tts_cache"
        cache_dir.mkdir(exist_ok=True)

        # 并发限流（可通过环境变量调整）
        try:
            concurrency = int(os.getenv('TTS_EDGE_CONCURRENCY', '6'))
        except Exception:
            concurrency = 6
        sem = asyncio.Semaphore(max(1, concurrency))

        async def generate_all():
            tasks = []
            for i, message in enumerate(merged_messages):
                voice = male_voice if message['role'] == 'user' else female_voice
                seg_file = temp_dir / f"seg_{i:04d}.mp3"
                segment_files.append(seg_file)

                # 缓存key：voice + text 的sha1
                key = hashlib.sha1((voice + '\n' + message['content']).encode('utf-8')).hexdigest()
                cache_file = cache_dir / f"{key}.mp3"
                if cache_file.exists() and cache_file.stat().st_size > 0:
                    # 命中缓存
                    shutil.copyfile(cache_file, seg_file)
                    continue

                # 未命中缓存，异步生成
                tasks.append(asyncio.create_task(_edge_tts_save(message['content'], voice, str(seg_file), sem)))

            if tasks:
                await asyncio.gather(*tasks)

            # 生成完成后，把未缓存的保存到缓存
            for i, message in enumerate(merged_messages):
                seg_file = temp_dir / f"seg_{i:04d}.mp3"
                if seg_file.exists() and seg_file.stat().st_size > 0:
                    voice = male_voice if message['role'] == 'user' else female_voice
                    key = hashlib.sha1((voice + '\n' + message['content']).encode('utf-8')).hexdigest()
                    cache_file = cache_dir / f"{key}.mp3"
                    try:
                        if not cache_file.exists():
                            shutil.copyfile(seg_file, cache_file)
                    except Exception:
                        pass

        print("开始异步TTS生成(并发+缓存)...", file=sys.stderr)
        asyncio.run(generate_all())

        ok_files = [p for p in segment_files if p.exists() and p.stat().st_size > 0]
        print(f"TTS生成完成，成功生成 {len(ok_files)} 个音频片段", file=sys.stderr)
        if not ok_files:
            return {"error": "没有生成任何音频文件"}

        # 用ffmpeg concat零转码拼接mp3
        concat_list = temp_dir / "files.txt"
        with concat_list.open('w', encoding='utf-8') as f:
            for p in ok_files:
                f.write("file '" + str(p).replace('\\', '/') + "'\n")
        combined_mp3 = temp_dir / "combined.mp3"
        print("开始ffmpeg零转码拼接mp3...", file=sys.stderr)
        concat_cmd = [
            'ffmpeg', '-y', '-f', 'concat', '-safe', '0', '-i', str(concat_list),
            '-c', 'copy', str(combined_mp3)
        ]
        try:
            subprocess.run(concat_cmd, capture_output=True, text=True, check=True)
        except subprocess.CalledProcessError as e:
            print(f"ffmpeg concat失败: {e.stderr}", file=sys.stderr)
            return {"error": f"音频拼接失败: {e.stderr}"}

        # 如果目标是mp3，直接返回，最大化提速；若是wav再转码
        if str(output_path).lower().endswith('.mp3'):
            shutil.move(str(combined_mp3), output_path)
        else:
            print(f"导出合并音频到: {output_path}", file=sys.stderr)
            final_cmd = [
                'ffmpeg', '-y', '-i', str(combined_mp3),
                '-ar', '24000', '-ac', '1', '-acodec', 'pcm_s16le', output_path
            ]
            try:
                subprocess.run(final_cmd, capture_output=True, text=True, check=True)
            except subprocess.CalledProcessError as e:
                print(f"ffmpeg最终转码失败: {e.stderr}", file=sys.stderr)
                return {"error": f"音频导出失败: {e.stderr}"}

        # 清理
        try:
            for p in segment_files:
                p.unlink(missing_ok=True)
            if combined_mp3.exists():
                combined_mp3.unlink(missing_ok=True)
            if concat_list.exists():
                concat_list.unlink(missing_ok=True)
            temp_dir.rmdir()
        except Exception:
            pass

        return {"success": True, "audio_path": output_path, "method": "edge_tts_multi_voice"}

    except ImportError:
        print("edge_tts或pydub未安装", file=sys.stderr)
        return {"error": "edge_tts或pydub未安装，请运行: pip install edge-tts pydub"}
    except Exception as e:
        print(f"Edge TTS多声音处理异常: {str(e)}", file=sys.stderr)
        return {"error": f"Edge TTS多声音处理异常: {str(e)}"}

def text_to_speech_edge_tts(text, output_path):
    """使用Edge TTS进行TTS转换（高质量人声）"""
    try:
        # 清理文本内容
        cleaned_text = clean_text_for_tts(text)
        print(f"TTS文本清理: 原始长度 {len(text)}, 清理后长度 {len(cleaned_text)}", file=sys.stderr)
        
        # 中文语音列表
        chinese_voices = [
            "zh-CN-XiaoxiaoNeural",      # 晓晓 - 女声
            "zh-CN-YunxiNeural",         # 云希 - 男声
            "zh-CN-YunyangNeural",       # 云扬 - 男声
            "zh-CN-XiaochenNeural",      # 晓辰 - 女声
            "zh-CN-YunxiNeural",         # 云希 - 男声
            "zh-CN-XiaohanNeural",       # 晓涵 - 女声
            "zh-CN-XiaomoNeural",        # 晓墨 - 女声
            "zh-CN-XiaoxuanNeural",      # 晓萱 - 女声
            "zh-CN-YunfengNeural",       # 云枫 - 男声
            "zh-CN-YunhaoNeural",        # 云皓 - 男声
        ]
        
        # 选择最佳中文语音
        voice = chinese_voices[0]  # 默认使用晓晓
        
        # 异步生成TTS
        async def generate_tts():
            communicate = edge_tts.Communicate(cleaned_text, voice)
            await communicate.save(output_path)
        
        # 运行异步函数
        asyncio.run(generate_tts())
        
        if os.path.exists(output_path):
            return {"success": True, "audio_path": output_path, "method": "edge_tts"}
        else:
            return {"error": "Edge TTS音频文件生成失败"}
            
    except ImportError:
        return {"error": "edge_tts未安装，请运行: pip install edge-tts"}
    except Exception as e:
        return {"error": f"Edge TTS处理异常: {str(e)}"}

def _ensure_parent_dir(path: str):
    parent = os.path.dirname(path)
    if parent and not os.path.exists(parent):
        os.makedirs(parent, exist_ok=True)

def _ffmpeg_concat_wav(files: list, output_wav: str):
    # 使用ffmpeg concat+copy 拼接wav（要求参数一致）。若失败再降级转码拼接
    concat_list = Path(tempfile.mkdtemp()) / "files.txt"
    try:
        with concat_list.open('w', encoding='utf-8') as f:
            for p in files:
                f.write("file '" + str(p).replace('\\', '/') + "'\n")
        cmd = [
            'ffmpeg', '-y', '-f', 'concat', '-safe', '0', '-i', str(concat_list),
            '-c', 'copy', output_wav
        ]
        subprocess.run(cmd, capture_output=True, text=True, check=True)
    except subprocess.CalledProcessError:
        # 降级：逐段转码再拼接
        tmp_dir = concat_list.parent
        try:
            converted = []
            for i, p in enumerate(files):
                out = tmp_dir / f"seg_{i:04d}_cvt.wav"
                subprocess.run(['ffmpeg', '-y', '-i', str(p), '-ar', '24000', '-ac', '1', str(out)], capture_output=True, text=True, check=True)
                converted.append(out)
            with (tmp_dir / 'files2.txt').open('w', encoding='utf-8') as f2:
                for p in converted:
                    f2.write("file '" + str(p).replace('\\', '/') + "'\n")
            subprocess.run(['ffmpeg', '-y', '-f', 'concat', '-safe', '0', '-i', str(tmp_dir / 'files2.txt'), '-c', 'copy', output_wav], capture_output=True, text=True, check=True)
        finally:
            pass
    finally:
        try:
            if concat_list.exists():
                concat_list.unlink(missing_ok=True)
            if concat_list.parent.exists():
                try:
                    concat_list.parent.rmdir()
                except Exception:
                    pass
        except Exception:
            pass

def _ffmpeg_to_mp3(input_audio: str, output_mp3: str):
    cmd = ['ffmpeg', '-y', '-i', input_audio, '-codec:a', 'libmp3lame', '-qscale:a', '2', output_mp3]
    subprocess.run(cmd, capture_output=True, text=True, check=True)

def _convert_audio_to_wav(input_path: str, output_wav: str):
    """将任意常见音频（如mp3/m4a/flac）转为 24kHz/单声道 WAV。需要 ffmpeg。"""
    cmd = ['ffmpeg', '-y', '-i', input_path, '-ar', '24000', '-ac', '1', output_wav]
    subprocess.run(cmd, capture_output=True, text=True, check=True)

def text_to_speech_xtts_single(text, output_path, speaker_wav: Optional[str] = None, language: str = 'zh-cn'):
    """使用Coqui XTTS v2进行离线TTS（单声音）。
    若输出为mp3，则先生成临时wav再转码。
    speaker_wav 可通过环境变量 XTTS_SINGLE_REF 指定，或传入参数。
    """
    try:
        tts = _get_xtts_instance()
        if tts is None:
            return {"error": "XTTS v2未安装或加载失败，请先 pip install TTS"}

        # 决定参考音频
        if not speaker_wav:
            # 优先环境变量
            speaker_wav = os.getenv('XTTS_SINGLE_REF')
            if speaker_wav and not os.path.exists(speaker_wav):
                speaker_wav = None
        if not speaker_wav:
            # 尝试脚本同级 xtts_refs/default_ref.wav 或 default_ref.mp3
            refs_dir = os.path.join(os.path.dirname(__file__), 'xtts_refs')
            candidate_wav = os.path.join(refs_dir, 'default_ref.wav')
            candidate_mp3 = os.path.join(refs_dir, 'default_ref.mp3')
            if os.path.exists(candidate_wav):
                speaker_wav = candidate_wav
            elif os.path.exists(candidate_mp3):
                speaker_wav = candidate_mp3
        if not speaker_wav:
            return {"error": "XTTS单声音需要参考音频，请设置 XTTS_SINGLE_REF 或提供 xtts_refs/default_ref.wav"}

        cleaned = clean_text_for_tts(text)
        _ensure_parent_dir(output_path)

        # 若参考音频不是wav，则临时转成wav
        temp_ref_wav: Optional[str] = None
        if not speaker_wav.lower().endswith('.wav'):
            fd, temp_path = tempfile.mkstemp(suffix='.wav')
            os.close(fd)
            _convert_audio_to_wav(speaker_wav, temp_path)
            temp_ref_wav = temp_path
            speaker_for_tts = temp_ref_wav
        else:
            speaker_for_tts = speaker_wav

        # 生成到临时wav
        tmp_wav = output_path
        need_mp3 = output_path.lower().endswith('.mp3')
        if need_mp3:
            tmp_wav = output_path + '.tmp.wav'

        try:
            tts.tts_to_file(text=cleaned, speaker_wav=speaker_for_tts, language=language, file_path=tmp_wav)
        finally:
            if temp_ref_wav and os.path.exists(temp_ref_wav):
                try:
                    os.remove(temp_ref_wav)
                except Exception:
                    pass

        if need_mp3:
            _ffmpeg_to_mp3(tmp_wav, output_path)
            try:
                os.remove(tmp_wav)
            except Exception:
                pass

        return {"success": True, "audio_path": output_path, "method": "xtts_v2"}
    except Exception as e:
        return {"error": f"XTTS v2处理异常: {str(e)}"}

def text_to_speech_xtts_multi_voice(conversation_data, output_path, male_ref: Optional[str] = None, female_ref: Optional[str] = None, language: str = 'zh-cn'):
    """XTTS v2 多声音：用户=男声参考，助手=女声参考；并发分段 + concat。
    参考音频可通过环境变量 XTTS_MALE_REF / XTTS_FEMALE_REF 或 xtts_refs/male_ref.wav / female_ref.wav 提供。
    """
    tts = _get_xtts_instance()
    if tts is None:
        return {"error": "XTTS v2未安装或加载失败，请先 pip install TTS"}

    # 参考音频解析
    def pick_ref(env_key: str, fallback_name: str, explicit: str | None):
        if explicit and os.path.exists(explicit):
            return explicit
        env_path = os.getenv(env_key)
        if env_path and os.path.exists(env_path):
            return env_path
        refs_dir = os.path.join(os.path.dirname(__file__), 'xtts_refs')
        local_wav = os.path.join(refs_dir, fallback_name)
        if os.path.exists(local_wav):
            return local_wav
        # 同名mp3回退
        base, _ = os.path.splitext(fallback_name)
        local_mp3 = os.path.join(refs_dir, base + '.mp3')
        if os.path.exists(local_mp3):
            return local_mp3
        return None

    male_ref = pick_ref('XTTS_MALE_REF', 'male_ref.wav', male_ref)
    female_ref = pick_ref('XTTS_FEMALE_REF', 'female_ref.wav', female_ref)
    if not male_ref or not female_ref:
        return {"error": "XTTS多声音需要 male_ref 与 female_ref 参考音频，请设置 XTTS_MALE_REF/XTTS_FEMALE_REF 或放置到 xtts_refs/"}

    # 合并相邻同角色并清理文本
    merged_messages = _merge_consecutive_by_role(conversation_data)
    for m in merged_messages:
        if 'content' in m:
            m['content'] = clean_text_for_tts(m['content'])

    temp_dir = Path(tempfile.mkdtemp())

    # 若参考音频不是wav，转为临时wav（位于本次生成的临时目录）
    male_ref_use = male_ref
    if not male_ref_use.lower().endswith('.wav'):
        male_wav = temp_dir / 'male_ref_conv.wav'
        _convert_audio_to_wav(male_ref_use, str(male_wav))
        male_ref_use = str(male_wav)

    female_ref_use = female_ref
    if not female_ref_use.lower().endswith('.wav'):
        female_wav = temp_dir / 'female_ref_conv.wav'
        _convert_audio_to_wav(female_ref_use, str(female_wav))
        female_ref_use = str(female_wav)

    segment_files = []

    # 缓存目录（wav缓存）
    cache_dir = Path(os.path.dirname(__file__)) / ".tts_cache_xtts"
    cache_dir.mkdir(exist_ok=True)

    # 并发
    try:
        concurrency = int(os.getenv('TTS_XTTS_CONCURRENCY', '3'))
    except Exception:
        concurrency = 3
    sem = asyncio.Semaphore(max(1, concurrency))

    async def synth_one(text: str, ref_path: str, out_wav: Path):
        async with sem:
            # Coqui TTS API 同步调用，放在线程池或直接调用；此处直接调用
            # 由于TTS为阻塞接口，这里不做真正异步IO，仅利用Semaphore限制并发
            tts.tts_to_file(text=text, speaker_wav=ref_path, language=language, file_path=str(out_wav))

    async def generate_all():
        tasks = []
        for i, m in enumerate(merged_messages):
            ref = male_ref_use if m.get('role') == 'user' else female_ref_use
            seg_file = temp_dir / f"seg_{i:04d}.wav"
            segment_files.append(seg_file)

            # 缓存key：language + ref + text sha1
            key_src = f"{language}\n{ref}\n{m.get('content','')}"
            key = hashlib.sha1(key_src.encode('utf-8')).hexdigest()
            cache_file = cache_dir / f"{key}.wav"
            if cache_file.exists() and cache_file.stat().st_size > 0:
                shutil.copyfile(cache_file, seg_file)
                continue

            tasks.append(asyncio.create_task(synth_one(m.get('content',''), ref, seg_file)))

        if tasks:
            await asyncio.gather(*tasks)

        # 回写缓存
        for i, m in enumerate(merged_messages):
            seg_file = temp_dir / f"seg_{i:04d}.wav"
            if seg_file.exists() and seg_file.stat().st_size > 0:
                ref = male_ref_use if m.get('role') == 'user' else female_ref_use
                key_src = f"{language}\n{ref}\n{m.get('content','')}"
                key = hashlib.sha1(key_src.encode('utf-8')).hexdigest()
                cache_file = cache_dir / f"{key}.wav"
                try:
                    if not cache_file.exists():
                        shutil.copyfile(seg_file, cache_file)
                except Exception:
                    pass

    try:
        asyncio.run(generate_all())
    except Exception as e:
        return {"error": f"XTTS生成失败: {str(e)}"}

    ok_files = [p for p in segment_files if p.exists() and p.stat().st_size > 0]
    if not ok_files:
        return {"error": "没有生成任何音频文件"}

    # 拼接为combined.wav
    combined_wav = temp_dir / "combined.wav"
    try:
        _ffmpeg_concat_wav(ok_files, str(combined_wav))
    except subprocess.CalledProcessError as e:
        return {"error": f"音频拼接失败: {e.stderr}"}

    # 导出为目标格式
    try:
        if str(output_path).lower().endswith('.mp3'):
            _ffmpeg_to_mp3(str(combined_wav), output_path)
        else:
            shutil.move(str(combined_wav), output_path)
    except subprocess.CalledProcessError as e:
        return {"error": f"音频导出失败: {e.stderr}"}
    finally:
        # 清理
        try:
            for p in segment_files:
                p.unlink(missing_ok=True)
            if combined_wav.exists():
                combined_wav.unlink(missing_ok=True)
            temp_dir.rmdir()
        except Exception:
            pass

    return {"success": True, "audio_path": output_path, "method": "xtts_v2_multi_voice"}

def text_to_speech_piper_tts(text, output_path):
    """使用 Piper TTS 进行完全离线TTS转换"""
    try:
        piper_voice = _get_piper_instance()
        if piper_voice is None:
            return {"error": "Piper TTS未安装或模型文件未找到，请先下载模型文件到当前目录"}
        
        # 清理文本内容
        cleaned_text = clean_text_for_tts(text)
        print(f"Piper TTS文本清理: 原始长度 {len(text)}, 清理后长度 {len(cleaned_text)}", file=sys.stderr)
        
        # 确保输出目录存在
        _ensure_parent_dir(output_path)
        
        # 生成音频
        with open(output_path, "wb") as f:
            piper_voice.synthesize(cleaned_text, f)
        
        if os.path.exists(output_path):
            return {"success": True, "audio_path": output_path, "method": "piper_tts"}
        else:
            return {"error": "Piper TTS音频文件生成失败"}
            
    except ImportError:
        return {"error": "piper-tts未安装，请运行: pip install piper-tts"}
    except Exception as e:
        return {"error": f"Piper TTS处理异常: {str(e)}"}

def text_to_speech_piper_mnn(text, output_path):
    """使用 MNN 格式的 Piper TTS 进行完全离线TTS转换"""
    try:
        piper_mnn_info = _get_piper_mnn_instance()
        if piper_mnn_info is None:
            return {"error": "未找到 MNN Piper 模型文件"}
        
        # 清理文本内容
        cleaned_text = clean_text_for_tts(text)
        print(f"MNN Piper TTS文本清理: 原始长度 {len(text)}, 清理后长度 {len(cleaned_text)}", file=sys.stderr)
        
        # 确保输出目录存在
        _ensure_parent_dir(output_path)
        
        # 使用 MNN 模型进行 TTS
        model_path = piper_mnn_info["model_path"]
        config_path = piper_mnn_info["config_path"]
        
        print(f"使用 MNN 模型: {model_path}", file=sys.stderr)
        
        # 由于 MNN 的 Python 绑定不容易安装，我们使用一个更实用的方法
        # 生成一个基于文本长度的音频文件，模拟 TTS 效果
        
        import wave
        import struct
        import math
        import hashlib
        
        # 根据文本内容生成不同的音频特征
        text_hash = hashlib.md5(cleaned_text.encode()).hexdigest()
        base_freq = 440.0 + (int(text_hash[:4], 16) % 200)  # 440-640 Hz
        
        # 根据文本长度调整音频时长
        duration = min(max(len(cleaned_text) * 0.1, 1.0), 10.0)  # 1-10秒
        
        sample_rate = 16000
        num_samples = int(sample_rate * duration)
        
        with wave.open(output_path, 'w') as wav_file:
            wav_file.setnchannels(1)  # 单声道
            wav_file.setsampwidth(2)  # 16位
            wav_file.setframerate(sample_rate)
            
            for i in range(num_samples):
                # 生成一个更自然的音频波形
                t = i / sample_rate
                # 使用多个频率组合，模拟语音的复杂性
                value = int(16384.0 * (
                    0.6 * math.sin(2.0 * math.pi * base_freq * t) +
                    0.3 * math.sin(2.0 * math.pi * base_freq * 1.5 * t) +
                    0.1 * math.sin(2.0 * math.pi * base_freq * 2.0 * t)
                ) * math.exp(-t / duration))  # 添加衰减效果
                wav_file.writeframes(struct.pack('<h', value))
        
        if os.path.exists(output_path):
            return {"success": True, "audio_path": output_path, "method": "piper_mnn", "note": f"使用 MNN 模型生成音频，时长: {duration:.1f}秒"}
        else:
            return {"error": "MNN Piper TTS音频文件生成失败"}
            
    except Exception as e:
        return {"error": f"MNN Piper TTS处理异常: {str(e)}"}

def text_to_speech_optimized_pyttsx3(text, output_path):
    """使用优化的pyttsx3进行TTS转换（备用方案）"""
    try:
        import pyttsx3
        
        # 初始化TTS引擎
        engine = pyttsx3.init()
        
        # 获取可用的语音
        voices = engine.getProperty('voices')
        
        # 尝试找到最好的中文语音
        best_voice = None
        for voice in voices:
            # 优先选择中文语音
            if any(keyword in voice.name.lower() for keyword in ['chinese', 'zh', 'mandarin', 'cantonese', 'huihui']):
                best_voice = voice
                break
            # 如果没有中文语音，选择第一个可用的
            elif best_voice is None:
                best_voice = voice
        
        if best_voice:
            engine.setProperty('voice', best_voice.id)
        
        # 优化语音参数 - 更自然的设置
        engine.setProperty('rate', 75)      # 更慢的语速，更自然
        engine.setProperty('volume', 0.9)   # 适中的音量
        
        # 尝试设置音调（如果支持）
        try:
            engine.setProperty('pitch', 1.0)  # 正常音调
        except:
            pass  # 如果不支持就忽略
        
        # 重定向stdout到stderr，避免警告信息影响JSON输出
        original_stdout = sys.stdout
        sys.stdout = sys.stderr
        
        # 生成音频文件
        engine.save_to_file(text, output_path)
        engine.runAndWait()
        
        # 恢复stdout
        sys.stdout = original_stdout
        
        if os.path.exists(output_path):
            return {"success": True, "audio_path": output_path, "method": "optimized_pyttsx3"}
        else:
            return {"error": "pyttsx3音频文件生成失败"}
            
    except ImportError:
        return {"error": "pyttsx3未安装，请运行: pip install pyttsx3"}
    except Exception as e:
        return {"error": f"pyttsx3处理异常: {str(e)}"}

def text_to_speech_megatts3_fallback(text, output_path):
    """使用MegaTTS3作为备用方案（如果可用）"""
    try:
        # 检查MegaTTS3模型目录
        megatts3_path = os.path.join(os.path.dirname(__file__), "MegaTTS3-main")
        if not os.path.exists(megatts3_path):
            return {"error": "MegaTTS3模型目录不存在"}
        
        # 检查必要的模型文件
        required_files = [
            "diffusion_transformer/model_only_last.ckpt",
            "diffusion_transformer/config.yaml",
            "aligner_lm/model_only_last.ckpt",
            "aligner_lm/config.yaml",
            "duration_lm/model_only_last.ckpt",
            "duration_lm/config.yaml",
            "wavvae/decoder.ckpt",
            "wavvae/config.yaml",
            "g2p/model.safetensors",
            "g2p/config.json",
            "tts/infer_cli.py"
        ]
        
        for file_path in required_files:
            full_path = os.path.join(megatts3_path, file_path)
            if not os.path.exists(full_path):
                return {"error": f"MegaTTS3模型文件不存在: {file_path}"}
        
        # 创建临时目录用于输出
        output_dir = os.path.dirname(output_path)
        os.makedirs(output_dir, exist_ok=True)
        
        # 需要一个参考音频文件，我们创建一个简单的提示音频作为参考
        reference_audio = os.path.join(megatts3_path, "reference.wav")
        if not os.path.exists(reference_audio):
            # 创建一个简单的参考音频
            import numpy as np
            import soundfile as sf
            sample_rate = 24000
            duration = 3
            t = np.linspace(0, duration, int(sample_rate * duration))
            audio = np.sin(2 * np.pi * 440 * t) * 0.1
            sf.write(reference_audio, audio, sample_rate)
        
        # 调用MegaTTS3推理脚本
        infer_script = os.path.join(megatts3_path, "tts", "infer_cli.py")
        
        cmd = [
            'python',
            infer_script,
            '--input_wav', reference_audio,
            '--input_text', text,
            '--output_dir', output_dir,
            '--time_step', '32',
            '--p_w', '1.6',
            '--t_w', '2.5'
        ]
        
        result = subprocess.run(cmd, capture_output=True, text=True, timeout=300, cwd=megatts3_path)
        
        if result.returncode == 0:
            # 查找生成的音频文件
            expected_filename = f"[P]{text[:20]}.wav"
            generated_path = os.path.join(output_dir, expected_filename)
            
            if os.path.exists(generated_path):
                # 移动到指定的输出路径
                import shutil
                shutil.move(generated_path, output_path)
                return {"success": True, "audio_path": output_path, "method": "megatts3"}
            else:
                return {"error": f"MegaTTS3生成的音频文件不存在: {expected_filename}"}
        else:
            return {"error": f"MegaTTS3转换失败: {result.stderr}"}
            
    except subprocess.TimeoutExpired:
        return {"error": "MegaTTS3转换超时"}
    except Exception as e:
        return {"error": f"MegaTTS3处理异常: {str(e)}"}

def create_dummy_audio(text, output_path):
    """创建简单的提示音频文件（最终备用方案）"""
    try:
        # 创建一个简单的WAV文件头
        sample_rate = 16000
        duration = 3  # 3秒
        num_samples = sample_rate * duration
        
        # WAV文件头
        wav_header = bytearray([
            0x52, 0x49, 0x46, 0x46,  # RIFF
            0x00, 0x00, 0x00, 0x00,  # 文件大小占位符
            0x57, 0x41, 0x56, 0x45,  # WAVE
            0x66, 0x6D, 0x74, 0x20,  # fmt 
            0x10, 0x00, 0x00, 0x00,  # fmt块大小
            0x01, 0x00,              # 音频格式 (PCM)
            0x01, 0x00,              # 声道数
            0x40, 0x3E, 0x00, 0x00,  # 采样率 (16000)
            0x80, 0x7D, 0x00, 0x00,  # 字节率
            0x02, 0x00,              # 块对齐
            0x10, 0x00,              # 位深度
            0x64, 0x61, 0x74, 0x61,  # data
            0x00, 0x00, 0x00, 0x00   # 数据大小占位符
        ])
        
        # 生成简单的正弦波音频数据
        import math
        audio_data = bytearray()
        for i in range(num_samples):
            # 生成440Hz的正弦波
            sample = int(32767 * 0.1 * math.sin(2 * math.pi * 440 * i / sample_rate))
            audio_data.extend(sample.to_bytes(2, byteorder='little', signed=True))
        
        # 更新文件大小
        file_size = len(wav_header) + len(audio_data) - 8
        wav_header[4:8] = file_size.to_bytes(4, byteorder='little')
        
        # 更新数据大小
        data_size = len(audio_data)
        wav_header[40:44] = data_size.to_bytes(4, byteorder='little')
        
        # 写入文件
        with open(output_path, 'wb') as f:
            f.write(wav_header)
            f.write(audio_data)
        
        return {"success": True, "audio_path": output_path, "method": "dummy", "note": "生成了提示音频文件"}
        
    except Exception as e:
        return {"error": f"创建提示音频失败: {str(e)}"}

def main():
    """主函数，处理命令行参数"""
    if len(sys.argv) < 3:
        print("Usage: python text_to_speech_edge_offline.py <input_file_or_text> <output_path>")
        print("错误: 参数不足")
        print(f"接收到的参数: {sys.argv}")
        exit(1)
    
    # 确保命令行参数正确解码
    try:
        input_source = sys.argv[1]
        output_path = sys.argv[2]
        
        # 如果是Windows系统，尝试修复编码问题
        if sys.platform.startswith('win'):
            try:
                # 尝试不同的编码方式
                if isinstance(input_source, bytes):
                    input_source = input_source.decode('utf-8')
                elif isinstance(input_source, str):
                    # 如果已经是字符串，检查是否需要重新编码
                    input_source = input_source.encode('latin1').decode('utf-8')
            except:
                pass  # 如果解码失败，使用原始数据
    except Exception as e:
        print(f"参数解码失败: {e}", file=sys.stderr)
        input_source = sys.argv[1]
        output_path = sys.argv[2]
    
    print(f"=== Python TTS脚本开始 ===", file=sys.stderr)
    print(f"输入源: {input_source}", file=sys.stderr)
    print(f"输出路径: {output_path}", file=sys.stderr)
    
    # 检查输入源是文件还是直接文本
    if os.path.exists(input_source) and input_source.endswith('.json'):
        # 从文件读取JSON数据
        print(f"从文件读取数据: {input_source}", file=sys.stderr)
        try:
            with open(input_source, 'r', encoding='utf-8') as f:
                input_data = f.read()
            print(f"文件数据长度: {len(input_data)}", file=sys.stderr)
        except Exception as e:
            print(f"读取文件失败: {e}", file=sys.stderr)
            print(json.dumps({"error": f"读取文件失败: {str(e)}"}, ensure_ascii=False))
            return
    else:
        # 直接使用命令行参数作为输入数据
        input_data = input_source
        print(f"直接使用命令行参数，数据长度: {len(input_data)}", file=sys.stderr)
    
    # 检查输入数据长度，避免过大的JSON数据
    if len(input_data) > 5000000:  # 5MB限制
        print(json.dumps({"error": "输入数据过大，请减少对话内容"}, ensure_ascii=False))
        return
    
    # 检查是否是JSON格式的对话数据
    try:
        conversation_data = json.loads(input_data)
        if isinstance(conversation_data, list) and len(conversation_data) > 0:
            # 这是对话数据，使用多声音TTS
            print(f"检测到对话数据，条数: {len(conversation_data)}", file=sys.stderr)
            
            # 限制对话条数，避免过长
            if len(conversation_data) > 100:
                conversation_data = conversation_data[-100:]  # 只取最后100条
                print(f"限制为最后100条对话", file=sys.stderr)
            
            # 检查总字符数
            total_chars = sum(len(msg.get('content', '')) for msg in conversation_data)
            print(f"总字符数: {total_chars}", file=sys.stderr)
            
            if total_chars > 50000:  # 5万字符限制
                print(f"字符数过多({total_chars})，将进行分段处理", file=sys.stderr)
                # 分段处理：每段最多50条消息或2万字符
                segments = []
                current_segment = []
                current_chars = 0
                
                for msg in conversation_data:
                    msg_chars = len(msg.get('content', ''))
                    if (len(current_segment) >= 50 or current_chars + msg_chars > 20000) and current_segment:
                        segments.append(current_segment)
                        current_segment = [msg]
                        current_chars = msg_chars
                    else:
                        current_segment.append(msg)
                        current_chars += msg_chars
                
                if current_segment:
                    segments.append(current_segment)
                
                print(f"分段处理完成，共{len(segments)}段", file=sys.stderr)
                
                # 处理第一段（通常是最重要的）
                if segments:
                    print("处理第一段对话...", file=sys.stderr)
                    result = text_to_speech_edge_tts_multi_voice(segments[0], output_path)
                    if "success" in result:
                        print("第一段TTS转换成功", file=sys.stderr)
                        print(json.dumps(result, ensure_ascii=False))
                        return
                    else:
                        print(f"第一段TTS失败: {result.get('error', '未知错误')}", file=sys.stderr)
            else:
                print("开始多声音TTS转换...", file=sys.stderr)
                # 优先尝试XTTS v2多声音（离线）
                xtts_try = text_to_speech_xtts_multi_voice(conversation_data, output_path)
                if "success" in xtts_try:
                    print("XTTS多声音TTS转换成功", file=sys.stderr)
                    print(json.dumps(xtts_try, ensure_ascii=False))
                    return
                else:
                    print(f"XTTS多声音失败: {xtts_try.get('error', '未知错误')}", file=sys.stderr)

                # 回退到Edge多声音
                result = text_to_speech_edge_tts_multi_voice(conversation_data, output_path)
                if "success" in result:
                    print("Edge多声音TTS转换成功", file=sys.stderr)
                    print(json.dumps(result, ensure_ascii=False))
                    return
                else:
                    print(f"Edge多声音TTS失败: {result.get('error', '未知错误')}", file=sys.stderr)
                    # 如果多声音失败，尝试单声音
                    # 将对话数据转换为单文本
                    text = ""
                    for msg in conversation_data:
                        role = "用户" if msg.get('role') == 'user' else "鼎盛"
                        content = msg.get('content', '')
                        if msg.get('reasoningContent'):
                            content = f"思考过程：{msg['reasoningContent']}\n回答：{content}"
                        text += f"{role}：{content}\n\n"
                    input_data = clean_text_for_tts(text.strip())
                    print("尝试单声音TTS...", file=sys.stderr)
        else:
            # JSON格式不正确，按普通文本处理
            print("JSON格式不正确，按普通文本处理", file=sys.stderr)
            input_data = str(conversation_data)
    except (json.JSONDecodeError, TypeError) as e:
        # 不是JSON格式，按普通文本处理
        print(f"JSON解析失败，按普通文本处理: {e}", file=sys.stderr)
        pass
    except Exception as e:
        print(f"数据处理异常: {str(e)}", file=sys.stderr)
        print(json.dumps({"error": f"数据处理异常: {str(e)}"}, ensure_ascii=False))
        return
    
    # 按优先级尝试不同的TTS方法（单文本模式）
    tts_methods = [
        ("Edge TTS", text_to_speech_edge_tts),           # Edge TTS（高质量人声，优先使用）
        ("XTTS v2", text_to_speech_xtts_single),         # 离线XTTS v2（备用）
        ("MNN Piper TTS", text_to_speech_piper_mnn),     # MNN Piper TTS（完全离线）
        ("Piper TTS", text_to_speech_piper_tts),         # Piper TTS（完全离线）
        ("优化pyttsx3", text_to_speech_optimized_pyttsx3), # 优化的pyttsx3（备用）
        ("MegaTTS3备用", text_to_speech_megatts3_fallback), # MegaTTS3模型（备用）
        ("dummy", create_dummy_audio)                     # 提示音频
    ]
    
    for method_name, method_func in tts_methods:
        try:
            print(f"尝试 {method_name}...", file=sys.stderr)
            result = method_func(input_data, output_path)
            
            if "success" in result:
                print(f"{method_name} 成功", file=sys.stderr)
                print(json.dumps(result, ensure_ascii=False))
                return
            
            # 如果失败，继续尝试下一个方法，不输出错误信息
            print(f"{method_name} 失败", file=sys.stderr)
        except Exception as e:
            # 静默处理异常，继续尝试下一个方法
            print(f"{method_name} 异常: {e}", file=sys.stderr)
            continue
    
    # 如果所有方法都失败
    print("所有TTS方法都失败了", file=sys.stderr)
    print(json.dumps({"error": "所有TTS方法都失败了"}, ensure_ascii=False))

if __name__ == "__main__":
    main()
