import asyncio
import concurrent.futures
import datetime
import threading
import time
import subprocess
import tempfile
import os
import re
import sys
import contextlib
import shutil
import multiprocessing  # 添加multiprocessing导入以支持Windows打包
from pathlib import Path
from typing import Any, Optional
from tkinter import END, Text, filedialog, messagebox, StringVar, BooleanVar, Canvas
from tkinter import ttk

from ttkbootstrap import Style

import edge_tts

try:
    import pygame
    HAS_PYGAME = True
except ImportError:
    HAS_PYGAME = False


AVERAGE_STATE: dict[str, float] = {"rate": 55.0}

VOICE_OPTIONS: list[dict[str, Any]] = [
    # 短视频/广告类
    {
        "label": "中文-自然女声（短视频解说）",
        "voice": "zh-CN-XiaoxiaoNeural",
        "style": "cheerful",
        "style_degree": 1.2,
        "fallback": {"rate": "+25%", "pitch": "+2Hz", "volume": "+4%"},
    },
    {
        "label": "自然男声（亲切科普型）",
        "voice": "zh-CN-YunxiNeural",
        "style": "narration-relaxed",
        "style_degree": 1.0,
        "rate": "+8%",
        "pitch": "+0Hz",
        "volume": "+0%"
    },
    {
        "label": "中文-活力男声（广告宣传）",
        "voice": "zh-CN-YunhaoNeural",
        "style": "advertisement-upbeat",
        "fallback": {"rate": "+10%", "pitch": "+3Hz", "volume": "+5%"},
    },
    {
        "label": "中文-甜美女声（直播带货）",
        "voice": "zh-CN-XiaochenNeural",
        "style": "livecommercial",
        "fallback": {"rate": "+15%", "pitch": "+2Hz", "volume": "+3%"},
    },
    {
        "label": "磁性男声（科普解说型）",
        "voice": "zh-CN-YunzeNeural",
        "style": "documentary-narration",
        "style_degree": 1.1,
        "rate": "+5%",
        "pitch": "-1Hz",
        "volume": "+6%"
    },    
    {
        "label": "专业男声（高效解说型）",
        "voice": "zh-CN-YunyangNeural",
        "style": "narration-professional",
        "style_degree": 1.3,
        "rate": "+15%",
        "pitch": "+1Hz",
        "volume": "+3%"
    },
    # 新闻播报类
    {
        "label": "中文-自然女声（专业播报）",
        "voice": "zh-CN-XiaoxiaoNeural",
        "style": "newscast-casual",
        "fallback": {"rate": "+5%", "pitch": "+0Hz"},
    },
    {
        "label": "中文-沉稳男声（新闻解说）",
        "voice": "zh-CN-YunjianNeural",
        "style": "newscast",
        "fallback": {"rate": "-5%", "pitch": "-1Hz"},
    },
    {
        "label": "中文-专业男声（新闻播报）",
        "voice": "zh-CN-YunyangNeural",
        "style": "newscast-casual",
        "fallback": {"rate": "+5%", "pitch": "+2Hz"},
    },
    {
        "label": "中文-沉稳男声（纪录片解说）",
        "voice": "zh-CN-YunjianNeural",
        "style": "documentary-narration",
        "fallback": {"rate": "+10%", "pitch": "+10Hz"},
    },
    {
        "label": "中文-磁性男声（纪录片）",
        "voice": "zh-CN-YunzeNeural",
        "style": "documentary-narration",
        "fallback": {"rate": "-8%", "pitch": "-2Hz"},
    },
    # 解说/叙述类
    {
        "label": "中文-情感女声（故事讲述）",
        "voice": "zh-CN-XiaohanNeural",
        "style": "narration-relaxed",
        "fallback": {"rate": "-10%", "pitch": "-1Hz"},
    },
    {
        "label": "中文-温和男声（轻松叙述）",
        "voice": "zh-CN-YunxiNeural",
        "style": "narration-relaxed",
        "fallback": {"rate": "-8%", "pitch": "-1Hz"},
    },
    {
        "label": "中文-专业男声（专业叙述）",
        "voice": "zh-CN-YunyangNeural",
        "style": "narration-professional",
        "fallback": {"rate": "-5%", "pitch": "-1Hz"},
    },
    {
        "label": "中文-沉稳男声（体育解说）",
        "voice": "zh-CN-YunjianNeural",
        "style": "sports-commentary",
        "fallback": {"rate": "+10%", "pitch": "+1Hz"},
    },
    {
        "label": "中文-激昂男声（激情体育）",
        "voice": "zh-CN-YunjianNeural",
        "style": "sports-commentary-excited",
        "fallback": {"rate": "+20%", "pitch": "+3Hz", "volume": "+5%"},
    },
    # 情感类 - 积极
    {
        "label": "中文-活泼女声（开心愉快）",
        "voice": "zh-CN-XiaoxiaoNeural",
        "style": "cheerful",
        "fallback": {"rate": "+10%", "pitch": "+2Hz"},
    },
    {
        "label": "中文-温柔女声（亲切友好）",
        "voice": "zh-CN-XiaohanNeural",
        "style": "affectionate",
        "fallback": {"rate": "-5%", "pitch": "+1Hz"},
    },
    {
        "label": "中文-甜美女声（温柔细腻）",
        "voice": "zh-CN-XiaomoNeural",
        "style": "gentle",
        "fallback": {"rate": "-8%", "pitch": "+1Hz"},
    },
    {
        "label": "中文-平静女声（冷静沉稳）",
        "voice": "zh-CN-XiaohanNeural",
        "style": "calm",
        "fallback": {"rate": "-10%", "pitch": "-1Hz"},
    },
    {
        "label": "中文-兴奋女声（激动兴奋）",
        "voice": "zh-CN-XiaoxiaoNeural",
        "style": "excited",
        "fallback": {"rate": "+20%", "pitch": "+3Hz", "volume": "+5%"},
    },
    # 情感类 - 中性/特殊
    {
        "label": "中文-自然女声（友好对话）",
        "voice": "zh-CN-XiaoxiaoNeural",
        "style": "friendly",
        "fallback": {"rate": "+5%", "pitch": "+1Hz"},
    },
    {
        "label": "中文-自然女声（轻松聊天）",
        "voice": "zh-CN-XiaoxiaoNeural",
        "style": "chat-casual",
        "fallback": {"rate": "+8%", "pitch": "+1Hz"},
    },
    {
        "label": "中文-温和男声（助手对话）",
        "voice": "zh-CN-YunxiNeural",
        "style": "assistant",
        "fallback": {"rate": "+0%", "pitch": "+0Hz"},
    },
    {
        "label": "中文-专业女声（客服服务）",
        "voice": "zh-CN-XiaoxiaoNeural",
        "style": "customerservice",
        "fallback": {"rate": "+5%", "pitch": "+0Hz"},
    },
    {
        "label": "中文-专业男声（客服服务）",
        "voice": "zh-CN-YunyangNeural",
        "style": "customerservice",
        "fallback": {"rate": "+3%", "pitch": "-1Hz"},
    },
    {
        "label": "中文-自然女声（诗歌朗诵）",
        "voice": "zh-CN-XiaoxiaoNeural",
        "style": "poetry-reading",
        "fallback": {"rate": "-15%", "pitch": "+1Hz"},
    },
    {
        "label": "中文-自然女声（轻声细语）",
        "voice": "zh-CN-XiaoxiaoNeural",
        "style": "whispering",
        "fallback": {"rate": "-5%", "pitch": "-2Hz", "volume": "-20%"},
    },
    {
        "label": "中文-自然女声（道歉语气）",
        "voice": "zh-CN-XiaoxiaoNeural",
        "style": "sorry",
        "fallback": {"rate": "-5%", "pitch": "-1Hz"},
    },
    # 情感类 - 消极（特殊场景）
    {
        "label": "中文-悲伤女声（悲伤难过）",
        "voice": "zh-CN-XiaohanNeural",
        "style": "sad",
        "fallback": {"rate": "-15%", "pitch": "-2Hz"},
    },
    {
        "label": "中文-严肃女声（严肃认真）",
        "voice": "zh-CN-XiaohanNeural",
        "style": "serious",
        "fallback": {"rate": "-5%", "pitch": "-1Hz"},
    },
    {
        "label": "中文-愤怒女声（愤怒不满）",
        "voice": "zh-CN-XiaohanNeural",
        "style": "angry",
        "fallback": {"rate": "+15%", "pitch": "+2Hz", "volume": "+5%"},
    },
    {
        "label": "中文-恐惧女声（恐惧害怕）",
        "voice": "zh-CN-XiaohanNeural",
        "style": "fearful",
        "fallback": {"rate": "+10%", "pitch": "+3Hz"},
    },
    {
        "label": "中文-尴尬女声（尴尬害羞）",
        "voice": "zh-CN-XiaomoNeural",
        "style": "embarrassed",
        "fallback": {"rate": "-5%", "pitch": "+1Hz"},
    },
]

SPLIT_MARK_INTERVAL = 3000
SPLIT_MARK_TEMPLATE = "【{count}】"
SPLIT_MARK_PATTERN = re.compile(r"\n?【\d+】\n?")
PREVIEW_DEFAULT_TEXT = "欢迎使用试听功能，这是默认示例文本。"
PREVIEW_CACHE: dict[str, tuple[Path, str]] = {}  # {voice_label: (audio_path, text_hash)}
PREVIEW_PLAYING = False


def _make_split_marker(position: int) -> str:
    return SPLIT_MARK_TEMPLATE.format(count=position)


def _build_marked_text(plain_text: str) -> tuple[str, list[str], list[str]]:
    if not plain_text:
        return "", [], []
    chunks = [
        plain_text[index : index + SPLIT_MARK_INTERVAL]
        for index in range(0, len(plain_text), SPLIT_MARK_INTERVAL)
    ]
    marks = [
        f"\n{_make_split_marker((chunk_index + 1) * SPLIT_MARK_INTERVAL)}\n"
        for chunk_index in range(len(chunks) - 1)
    ]
    parts: list[str] = []
    for idx, chunk in enumerate(chunks):
        parts.append(chunk)
        if idx < len(marks):
            parts.append(marks[idx])
    return "".join(parts), chunks, marks


def _plain_offset_to_marked_index(
    chunks: list[str], marks: list[str], offset: int
) -> int:
    total_plain = sum(len(chunk) for chunk in chunks)
    offset = max(0, min(offset, total_plain))
    marked_index = 0
    remaining = offset
    for idx, chunk in enumerate(chunks):
        chunk_len = len(chunk)
        if remaining <= chunk_len:
            marked_index += remaining
            return marked_index
        marked_index += chunk_len
        remaining -= chunk_len
        if idx < len(marks):
            marked_index += len(marks[idx])
    return marked_index


def remove_split_markers(text: str) -> str:
    return SPLIT_MARK_PATTERN.sub("", text)


def ensure_split_markers(text: str) -> str:
    plain = remove_split_markers(text)
    marked, _, _ = _build_marked_text(plain)
    return marked


def play_audio_internal(path: Path) -> bool:
    """尝试在应用内播放音频，返回是否成功"""
    global PREVIEW_PLAYING
    if not HAS_PYGAME:
        return False
    
    try:
        if not pygame.mixer.get_init():
            pygame.mixer.init()
        
        # 停止之前的播放
        if PREVIEW_PLAYING:
            pygame.mixer.music.stop()
        
        pygame.mixer.music.load(str(path))
        pygame.mixer.music.play()
        PREVIEW_PLAYING = True
        return True
    except Exception:  # noqa: BLE001
        return False


def open_audio_file(path: Path) -> None:
    """播放音频文件，优先尝试内部播放"""
    if not play_audio_internal(path):
        # 回退到外部播放器
        try:
            if sys.platform.startswith("win"):
                os.startfile(str(path))  # type: ignore[attr-defined]
                return
            if sys.platform == "darwin":
                subprocess.Popen(["open", str(path)], close_fds=True)
                return
            for candidate in ("ffplay", "aplay", "paplay", "vlc", "mpv"):
                if shutil.which(candidate):
                    subprocess.Popen([candidate, str(path)], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, close_fds=True)
                    return
            raise RuntimeError("未找到可用的播放器，请检查系统关联。")
        except Exception as exc:  # noqa: BLE001
            raise RuntimeError(f"无法播放试听文件：{exc}") from exc


_STYLE_CAPABILITY: dict[str, Optional[bool]] = {"style": None, "style_degree": None}
_STYLE_WARNING_SHOWN = False


def format_duration(seconds: float) -> str:
    total_seconds = max(int(seconds), 0)
    minutes, secs = divmod(total_seconds, 60)
    return f"{minutes:02}:{secs:02}"


def apply_controls(target: dict[str, Any], source: dict[str, Any]) -> None:
    for field in ("rate", "volume", "pitch"):
        value = source.get(field)
        if value:
            target[field] = value


def _try_create_communicator(text: str, kwargs: dict[str, Any]) -> Optional[edge_tts.Communicate]:
    try:
        return edge_tts.Communicate(text, **kwargs)
    except TypeError:
        return None


def ensure_style_capability(voice_option: dict[str, Any]) -> None:
    style = voice_option.get("style")
    if style is None:
        if _STYLE_CAPABILITY["style"] is None:
            _STYLE_CAPABILITY["style"] = True
            _STYLE_CAPABILITY["style_degree"] = True
        return

    if _STYLE_CAPABILITY["style"] is not None:
        return

    voice = voice_option["voice"]
    base_kwargs: dict[str, Any] = {"voice": voice}
    apply_controls(base_kwargs, voice_option)
    base_kwargs["style"] = style
    style_degree = voice_option.get("style_degree")
    if style_degree is not None:
        base_kwargs["style_degree"] = style_degree

    communicator = _try_create_communicator("", base_kwargs)
    if communicator is not None:
        _STYLE_CAPABILITY["style"] = True
        _STYLE_CAPABILITY["style_degree"] = "style_degree" in base_kwargs
        return

    if "style_degree" in base_kwargs:
        base_kwargs_without_degree = dict(base_kwargs)
        base_kwargs_without_degree.pop("style_degree", None)
        communicator = _try_create_communicator("", base_kwargs_without_degree)
        if communicator is not None:
            _STYLE_CAPABILITY["style"] = True
            _STYLE_CAPABILITY["style_degree"] = False
            return

    _STYLE_CAPABILITY["style"] = False
    _STYLE_CAPABILITY["style_degree"] = False


def prepare_voice_kwargs(voice_option: dict[str, Any]) -> dict[str, Any]:
    kwargs: dict[str, Any] = {"voice": voice_option["voice"]}
    apply_controls(kwargs, voice_option)
    style = voice_option.get("style")
    style_supported = _STYLE_CAPABILITY["style"]
    style_degree_supported = _STYLE_CAPABILITY["style_degree"]
    if style and style_supported:
        kwargs["style"] = style
        if style_degree_supported and "style_degree" in voice_option:
            kwargs["style_degree"] = voice_option["style_degree"]
    elif style:
        fallback = voice_option.get("fallback", {})
        apply_controls(kwargs, fallback)
    return kwargs


def ensure_event_loop_policy() -> None:
    """
    在 Windows 平台上显式设置 Selector 事件循环，避免 edge-tts 的 asyncio 兼容性问题。
    """
    if hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
        asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())


def build_communicator(text: str, voice_option: dict[str, Any]) -> edge_tts.Communicate:
    """
    构建 Communicate 对象，可选携带风格与语速等设定。
    """
    ensure_style_capability(voice_option)
    kwargs = prepare_voice_kwargs(voice_option)
    return edge_tts.Communicate(text, **kwargs)


def async_save_tts(text: str, voice_option: dict[str, Any], output_path: Path) -> None:
    """
    执行异步语音合成，并阻塞直到保存完成。
    """

    async def _run() -> None:
        communicator = build_communicator(text, voice_option)
        await communicator.save(str(output_path))

    ensure_event_loop_policy()
    asyncio.run(_run())


def synthesize_to_file(text: str, voice_kwargs: dict[str, Any], output_path: Path) -> str:
    """
    独立进程中执行语音合成，返回生成文件路径。
    """

    async def _run() -> None:
        communicator = edge_tts.Communicate(text, **voice_kwargs)
        await communicator.save(str(output_path))

    ensure_event_loop_policy()
    asyncio.run(_run())
    return str(output_path)


def main() -> None:
    style = Style(theme="flatly")
    root = style.master
    root.title("文字转语音工具")
    root.geometry("960x640")
    try:
        root.state("zoomed")
    except Exception:  # noqa: BLE001
        root.attributes("-zoomed", True)

    # 语音风格选择
    voice_label = ttk.Label(root, text="语音风格：")
    voice_label.pack(anchor="w", padx=20, pady=(8, 0))

    voice_names = [option["label"] for option in VOICE_OPTIONS]
    voice_var = StringVar(value=voice_names[0])
    voice_combo = ttk.Combobox(root, textvariable=voice_var, values=voice_names, state="readonly")
    voice_combo.pack(fill="x", padx=20)

    merge_var = BooleanVar(value=False)
    merge_check = ttk.Checkbutton(root, text="合并选中段落生成单个音频", variable=merge_var)
    merge_check.pack(anchor="w", padx=20, pady=(8, 0))

    segments_frame = ttk.Frame(root)
    segments_frame.pack(fill="both", expand=True, padx=16, pady=(12, 8))

    segments_canvas = Canvas(segments_frame, borderwidth=0, highlightthickness=0)
    segments_scrollbar = ttk.Scrollbar(segments_frame, orient="vertical", command=segments_canvas.yview)
    segments_canvas.configure(yscrollcommand=segments_scrollbar.set)

    segments_scrollbar.pack(side="right", fill="y")
    segments_canvas.pack(side="left", fill="both", expand=True)

    segments_container = ttk.Frame(segments_canvas)
    container_window = segments_canvas.create_window((0, 0), window=segments_container, anchor="nw")

    def _update_scrollregion(_: object) -> None:
        segments_canvas.configure(scrollregion=segments_canvas.bbox("all"))

    def _resize_container(event: Any) -> None:
        segments_canvas.itemconfigure(container_window, width=event.width)

    segments_container.bind("<Configure>", _update_scrollregion)
    segments_canvas.bind("<Configure>", _resize_container)

    scroll_state: dict[str, Any] = {"target": 0.0, "job": None}

    def clamp(value: float, minimum: float, maximum: float) -> float:
        return max(minimum, min(maximum, value))

    def _animate_scroll() -> None:
        current = segments_canvas.yview()[0]
        target = scroll_state["target"]
        diff = target - current
        if abs(diff) < 1e-4:
            segments_canvas.yview_moveto(clamp(target, 0.0, 1.0))
            scroll_state["job"] = None
            return
        segments_canvas.yview_moveto(clamp(current + diff * 0.25, 0.0, 1.0))
        scroll_state["job"] = root.after(16, _animate_scroll)

    def _scroll_to(target: float) -> None:
        scroll_state["target"] = clamp(target, 0.0, 1.0)
        job = scroll_state.get("job")
        if job is None:
            scroll_state["job"] = root.after(0, _animate_scroll)

    def _on_mouse_wheel(event: Any) -> str:
        if getattr(event, "delta", 0):
            delta = event.delta / 120.0
        elif getattr(event, "num", None) in (4, 5):
            delta = -1.0 if event.num == 4 else 1.0
        else:
            return "break"

        current = segments_canvas.yview()[0]
        step = -delta * 0.08
        _scroll_to(current + step)
        return "break"

    segments_canvas.bind_all("<MouseWheel>", _on_mouse_wheel)
    segments_canvas.bind_all("<Button-4>", _on_mouse_wheel)
    segments_canvas.bind_all("<Button-5>", _on_mouse_wheel)

    def _unbind_scroll(_: object) -> None:
        segments_canvas.unbind_all("<MouseWheel>")
        segments_canvas.unbind_all("<Button-4>")
        segments_canvas.unbind_all("<Button-5>")

    def cleanup_on_exit() -> None:
        """程序退出时的清理工作"""
        _unbind_scroll(None)
        
        # 清理用户自定义的试听缓存（保留默认文本的缓存）
        default_prefix = PREVIEW_DEFAULT_TEXT[:20]
        for voice_label, (audio_path, text_hash) in list(PREVIEW_CACHE.items()):
            try:
                # 只删除非默认文本的缓存文件
                if text_hash != str(hash(default_prefix)) and audio_path.exists():
                    audio_path.unlink(missing_ok=True)
            except Exception:  # noqa: BLE001
                pass
        
        # 停止pygame音频播放
        if HAS_PYGAME:
            try:
                pygame.mixer.quit()
            except Exception:  # noqa: BLE001
                pass
        
        root.destroy()
    
    root.protocol("WM_DELETE_WINDOW", cleanup_on_exit)

    segments: list[dict[str, Any]] = []

    def refresh_markers(segment: dict[str, Any]) -> None:
        segment["marker_job"] = None
        if not segment.get("auto_marker"):
            return
        text_widget: Text = segment["text"]
        if not text_widget.winfo_exists():
            return

        insert_index = text_widget.index("insert")
        prefix_plain = remove_split_markers(text_widget.get("1.0", insert_index))
        plain_offset = len(prefix_plain)

        raw_content = text_widget.get("1.0", "end-1c")
        plain_content = remove_split_markers(raw_content)
        new_content, chunks, marks = _build_marked_text(plain_content)

        if new_content == raw_content:
            return

        view_info = text_widget.yview()
        segment["updating_markers"] = True
        try:
            text_widget.delete("1.0", END)
            text_widget.insert("1.0", new_content)
        finally:
            segment["updating_markers"] = False

        if view_info:
            text_widget.yview_moveto(view_info[0])
        marked_offset = _plain_offset_to_marked_index(chunks, marks, plain_offset)
        text_widget.mark_set("insert", f"1.0+{marked_offset}c")
        text_widget.see("insert")

        segment["clean_text"] = plain_content.strip()
        text_widget.edit_modified(False)
        update_segment_counter(segment)

    def schedule_marker_refresh(segment: dict[str, Any]) -> None:
        if not segment.get("auto_marker"):
            return
        job = segment.get("marker_job")
        if job is not None:
            root.after_cancel(job)
        segment["marker_job"] = root.after(150, lambda: refresh_markers(segment))

    def renumber_segments() -> None:
        for index, segment in enumerate(segments, start=1):
            segment["frame"].configure(text=f"段落 {index}")

    def extract_preview_text() -> str:
        if not segments:
            return ""
        first_segment = segments[0]
        text_widget: Text = first_segment["text"]
        raw_content = text_widget.get("1.0", "end-1c")
        plain_content = remove_split_markers(raw_content).strip()
        return plain_content[:20]

    def update_segment_counter(segment: dict[str, Any]) -> None:
        raw_content = segment["text"].get("1.0", "end-1c")
        plain_content = remove_split_markers(raw_content).strip()
        segment["clean_text"] = plain_content
        segment["counter_var"].set(f"字数：{len(plain_content)}")
        segment["text"].edit_modified(False)

    def create_segment(initial_text: str = "") -> None:
        if len(segments) >= 10:
            messagebox.showwarning("提示", "最多只能添加 10 段文本。")
            return

        frame = ttk.LabelFrame(segments_container, text="段落")
        frame.pack(fill="both", expand=True, pady=(0, 12))

        header_frame = ttk.Frame(frame)
        header_frame.pack(fill="x", padx=8, pady=(8, 4))

        include_var = BooleanVar(value=True)
        include_check = ttk.Checkbutton(header_frame, text="参与合成", variable=include_var)
        include_check.pack(side="left")

        text_widget = Text(
            frame,
            wrap="word",
            height=8,
            font=("Microsoft YaHei", 11),
            undo=True,
            autoseparators=True,
            maxundo=-1,
        )
        text_widget.pack(fill="both", expand=True, padx=8, pady=(0, 8))
        if initial_text:
            text_widget.insert("1.0", initial_text)

        resizer = ttk.Frame(frame, height=6, cursor="sb_v_double_arrow")
        resizer.pack(fill="x", padx=8, pady=(0, 8))

        info_frame = ttk.Frame(frame)
        info_frame.pack(fill="x", padx=8, pady=(0, 4))

        counter_var = StringVar(value="字数：0")
        counter_label = ttk.Label(info_frame, textvariable=counter_var)
        counter_label.pack(side="left")

        status_var = StringVar(value="进度：未开始")
        status_label = ttk.Label(info_frame, textvariable=status_var, foreground="#1a73e8")
        status_label.pack(side="left", padx=(12, 0))

        progress_bar = ttk.Progressbar(frame, mode="determinate", maximum=100)
        progress_bar.pack(fill="x", padx=8, pady=(0, 8))

        segment: dict[str, Any] = {
            "frame": frame,
            "text": text_widget,
            "include_var": include_var,
            "counter_var": counter_var,
            "status_var": status_var,
            "progress": progress_bar,
            "char_count": 0,
            "auto_marker": not segments,
            "marker_job": None,
            "updating_markers": False,
            "clean_text": "",
        }
        segments.append(segment)
        renumber_segments()

        def handle_modified(_: object) -> None:
            if segment.get("updating_markers"):
                text_widget.edit_modified(False)
                return
            schedule_marker_refresh(segment)
            update_segment_counter(segment)

        resize_state: dict[str, float] = {"start_y": 0.0, "start_height": float(text_widget.winfo_reqheight()), "line": 20.0}

        def _ensure_line_height() -> float:
            try:
                line_info = text_widget.dlineinfo("1.0")
                if line_info:
                    return float(line_info[4])
            except Exception:  # noqa: BLE001
                pass
            return float(text_widget.winfo_fpixels("1.0p") or 20.0)

        def handle_resize_start(event: Any) -> None:
            resize_state["start_y"] = event.y_root
            resize_state["start_height"] = float(text_widget.winfo_height())
            resize_state["line"] = _ensure_line_height()

        def handle_resize_drag(event: Any) -> None:
            delta = event.y_root - resize_state["start_y"]
            new_height_px = max(120.0, resize_state["start_height"] + delta)
            line_height = max(12.0, resize_state["line"])
            new_height_lines = int(max(4, min(40, new_height_px / line_height)))
            text_widget.configure(height=new_height_lines)

        resizer.bind("<ButtonPress-1>", handle_resize_start)
        resizer.bind("<B1-Motion>", handle_resize_drag)

        def handle_undo(event: object, widget: Text = text_widget) -> str:
            try:
                widget.edit_undo()
            except Exception:  # noqa: BLE001
                pass
            return "break"

        def handle_redo(event: object, widget: Text = text_widget) -> str:
            try:
                widget.edit_redo()
            except Exception:  # noqa: BLE001
                pass
            return "break"

        text_widget.bind("<<Modified>>", handle_modified)
        text_widget.bind("<Control-z>", handle_undo)
        text_widget.bind("<Control-y>", handle_redo)
        text_widget.bind("<Control-Z>", handle_undo)
        text_widget.bind("<Control-Y>", handle_redo)
        text_widget.edit_modified(False)
        update_segment_counter(segment)
        if segment["auto_marker"]:
            schedule_marker_refresh(segment)

        segments_canvas.update_idletasks()
        segments_canvas.yview_moveto(1.0)

    buttons_frame = ttk.Frame(root)
    buttons_frame.pack(fill="x", padx=16, pady=(0, 8))

    add_segment_button = ttk.Button(buttons_frame, text="新增文本段", command=create_segment)
    add_segment_button.pack(side="left")
    preview_button = ttk.Button(buttons_frame, text="试听")
    preview_button.pack(side="left", padx=(8, 0))

    # 合成按钮
    convert_button = ttk.Button(root, text="开始转换")
    convert_button.pack(pady=(4, 16))

    # 状态与计时
    status_var = StringVar(value="准备就绪")
    timer_var = StringVar(value="合成计时：00:00 | 预计剩余：--")

    bottom_frame = ttk.Frame(root)
    bottom_frame.pack(fill="x", side="bottom", padx=20, pady=(8, 16))

    status_label = ttk.Label(bottom_frame, textvariable=status_var, foreground="#1a73e8")
    status_label.pack(anchor="w")

    timer_label = ttk.Label(bottom_frame, textvariable=timer_var)
    timer_label.pack(anchor="w", pady=(4, 0))

    timer_state: dict[str, Any] = {"job": None, "start": 0.0, "estimate": 0.0, "chars": 0}

    def cancel_timer() -> None:
        job = timer_state.get("job")
        if job is not None:
            root.after_cancel(job)
        timer_state["job"] = None

    def start_timer(estimate: float, chars: int) -> None:
        cancel_timer()
        timer_state["start"] = time.perf_counter()
        timer_state["estimate"] = estimate
        timer_state["chars"] = chars

        def _tick() -> None:
            start = timer_state["start"]
            if start == 0.0:
                return
            elapsed = time.perf_counter() - start
            estimate_value = timer_state["estimate"]
            remaining = max(estimate_value - elapsed, 0.0)
            remaining_text = format_duration(remaining) if estimate_value > 0 else "--"
            timer_var.set(
                f"合成计时：{format_duration(elapsed)} | 预计剩余：{remaining_text}"
            )
            timer_state["job"] = root.after(500, _tick)

        _tick()

    def stop_timer(success: bool) -> None:
        elapsed = 0.0
        if timer_state["start"]:
            elapsed = time.perf_counter() - timer_state["start"]
        cancel_timer()
        timer_state["start"] = 0.0
        timer_state["estimate"] = 0.0
        timer_state["chars"] = 0
        if success:
            timer_var.set(f"合成计时：{format_duration(elapsed)} | 预计剩余：00:00")
        else:
            timer_var.set("合成计时：00:00 | 预计剩余：--")

    def locate_selected_voice() -> dict[str, str]:
        selected = voice_var.get()
        for option in VOICE_OPTIONS:
            if option["label"] == selected:
                return option
        return VOICE_OPTIONS[0]

    def handle_preview() -> None:
        if str(preview_button["state"]) == "disabled":
            return
        if str(convert_button["state"]) == "disabled":
            messagebox.showinfo("提示", "当前正在合成语音，请稍候再试听。")
            return

        # 获取试听文本
        preview_text = extract_preview_text().strip()
        if not preview_text:
            preview_text = PREVIEW_DEFAULT_TEXT
        preview_text = preview_text[:20] if preview_text else PREVIEW_DEFAULT_TEXT[:20]
        
        # 计算文本哈希值用于缓存判断
        text_hash = str(hash(preview_text))
        
        voice_option = locate_selected_voice()
        voice_label = voice_option["label"]
        
        # 检查缓存
        cached = PREVIEW_CACHE.get(voice_label)
        if cached:
            cached_path, cached_hash = cached
            if cached_hash == text_hash and cached_path.exists():
                # 直接播放缓存的音频
                open_audio_file(cached_path)
                return
        
        # 需要生成新的试听音频
        ensure_style_capability(voice_option)
        voice_kwargs = prepare_voice_kwargs(voice_option)

        previous_status = status_var.get()
        status_var.set("正在生成试听音频…")
        preview_button.config(state="disabled")

        def worker() -> None:
            kwargs = dict(voice_kwargs)
            # 为默认文本创建固定路径，用户文本创建临时路径
            is_default = preview_text == PREVIEW_DEFAULT_TEXT[:20]
            if is_default:
                # 默认文本放在固定位置
                temp_dir = Path(tempfile.gettempdir()) / "tts_preview_cache"
                temp_dir.mkdir(exist_ok=True)
                temp_path = temp_dir / f"default_{voice_label.replace(' ', '_')}.mp3"
            else:
                # 用户文本使用临时文件
                temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3", prefix="tts_preview_")
                temp_path = Path(temp_file.name)
                temp_file.close()

            try:
                async def _run() -> None:
                    communicator = edge_tts.Communicate(preview_text, **kwargs)
                    await communicator.save(str(temp_path))

                ensure_event_loop_policy()
                asyncio.run(_run())
                
                # 更新缓存
                PREVIEW_CACHE[voice_label] = (temp_path, text_hash)
                
                # 播放音频
                open_audio_file(temp_path)

            except Exception as exc:  # noqa: BLE001
                root.after(0, lambda err=exc: messagebox.showerror("错误", f"试听失败：{err}"))
            finally:
                def finalize() -> None:
                    status_var.set(previous_status)
                    preview_button.config(state="normal")

                root.after(0, finalize)

        threading.Thread(target=worker, daemon=True).start()

    preview_button.configure(command=handle_preview)

    def set_segment_status(segment: dict[str, Any], message: str, progress: Optional[float]) -> None:
        def _apply() -> None:
            segment["status_var"].set(f"进度：{message}")
            if progress is not None:
                segment["progress"]["value"] = progress

        root.after(0, _apply)

    def handle_conversion() -> None:
        active_segments: list[dict[str, Any]] = []
        total_chars = 0
        for segment in segments:
            if not segment["include_var"].get():
                set_segment_status(segment, "已跳过", 0)
                continue
            raw_text = segment["text"].get("1.0", "end-1c")
            text_content = remove_split_markers(raw_text).strip()
            if not text_content:
                set_segment_status(segment, "空文本，已跳过", 0)
                continue
            segment["char_count"] = len(text_content)
            segment["clean_text"] = text_content
            total_chars += segment["char_count"]
            active_segments.append(segment)

        if not active_segments:
            messagebox.showwarning("提示", "请选择至少一个包含内容的文本段。")
            return

        voice_option = locate_selected_voice()
        global _STYLE_WARNING_SHOWN  # noqa: PLW0603
        ensure_style_capability(voice_option)
        style_capable = _STYLE_CAPABILITY["style"]
        if (
            voice_option.get("style")
            and style_capable is False
            and not _STYLE_WARNING_SHOWN
        ):
            _STYLE_WARNING_SHOWN = True
            status_var.set("当前版本不支持官方语音风格，已启用模拟效果。")

        voice_kwargs = prepare_voice_kwargs(voice_option)
        rate = AVERAGE_STATE["rate"]
        estimated_total = total_chars / rate if total_chars and rate > 0 else 0.0

        default_name = f"tts_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.mp3"
        file_path = filedialog.asksaveasfilename(
            defaultextension=".mp3",
            filetypes=[("音频文件", "*.mp3"), ("所有文件", "*.*")],
            initialfile=default_name,
            title="保存语音文件",
        )
        if not file_path:
            return
        target_path = Path(file_path)

        combine_enabled = merge_var.get()

        for segment in segments:
            if segment in active_segments:
                set_segment_status(segment, "排队中", 0)
            segment["progress"]["value"] = 0

        convert_button.config(state="disabled")
        add_segment_button.config(state="disabled")
        status_var.set("语音合成中，请稍候…")
        start_timer(estimated_total, total_chars)

        def worker() -> None:
            success_entries: list[tuple[int, Path]] = []
            errors: list[str] = []

            futures: dict[concurrent.futures.Future[str], tuple[int, dict[str, Any], Path]] = {}

            max_workers = min(len(active_segments), 4)
            with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
                for index, segment in enumerate(active_segments, start=1):
                    text_content = segment.get("clean_text", "").strip()
                    segment_path = target_path
                    multiple_segments = len(active_segments) > 1 or combine_enabled
                    if multiple_segments:
                        segment_path = target_path.with_name(
                            f"{target_path.stem}_part{index}{target_path.suffix}"
                        )

                    set_segment_status(segment, "合成中…", 20)
                    future = executor.submit(
                        synthesize_to_file,
                        text_content,
                        dict(voice_kwargs),
                        segment_path,
                    )
                    futures[future] = (index, segment, segment_path)

                completed = 0
                total = len(active_segments)

                for future in concurrent.futures.as_completed(futures):
                    index, segment, segment_path = futures[future]
                    try:
                        future.result()
                    except Exception as exc:  # noqa: BLE001
                        errors.append(f"第 {index} 段失败：{exc}")
                        set_segment_status(segment, "合成失败", 0)
                    else:
                        success_entries.append((index, segment_path))
                        set_segment_status(segment, "完成", 100)
                    finally:
                        completed += 1

                        def update_overall(progress_text: str) -> None:
                            status_var.set(progress_text)

                        root.after(
                            0,
                            lambda c=completed, t=total: update_overall(
                                f"正在合成：已完成 {c}/{t}"
                            ),
                        )

            def finalize() -> None:
                add_segment_button.config(state="normal")
                convert_button.config(state="normal")

                elapsed = 0.0
                if timer_state["start"]:
                    elapsed = time.perf_counter() - timer_state["start"]

                ordered_success = sorted(success_entries, key=lambda item: item[0])
                success_paths = [path for _, path in ordered_success]

                stop_timer(bool(success_paths))

                if elapsed > 0 and total_chars > 0 and success_paths:
                    actual_rate = total_chars / elapsed
                    old_rate = AVERAGE_STATE["rate"]
                    AVERAGE_STATE["rate"] = max(
                        10.0,
                        min(120.0, old_rate * 0.7 + actual_rate * 0.3),
                    )

                if errors:
                    messagebox.showerror("错误", "\n".join(errors))

                merged_info = ""
                if combine_enabled and success_paths:
                    merged = False
                    pydub_error: Optional[str] = None
                    AudioSegment = None  # type: ignore[assignment]
                    try:
                        from pydub import AudioSegment as _AudioSegment  # type: ignore[import]

                        AudioSegment = _AudioSegment
                    except Exception as exc:  # noqa: BLE001
                        pydub_error = str(exc)
                    else:
                        try:
                            combined = None
                            for path in success_paths:
                                audio = AudioSegment.from_file(path)
                                combined = audio if combined is None else combined + audio
                            if combined is not None:
                                combined.export(
                                    target_path,
                                    format=target_path.suffix.removeprefix("."),
                                )
                                merged_info = f"\n已合并生成：{target_path}"
                                merged = True
                        except Exception as exc:  # noqa: BLE001
                            pydub_error = str(exc)

                    if not merged:
                        concat_path: Optional[Path] = None
                        try:
                            concat_file = tempfile.NamedTemporaryFile(
                                "w",
                                suffix=".txt",
                                delete=False,
                                encoding="utf-8",
                            )
                            concat_path = Path(concat_file.name)
                            for path in success_paths:
                                safe_path = str(path.resolve()).replace("'", r"'\''")
                                concat_file.write(f"file '{safe_path}'\n")
                            concat_file.close()

                            result = subprocess.run(
                                [
                                    "ffmpeg",
                                    "-y",
                                    "-f",
                                    "concat",
                                    "-safe",
                                    "0",
                                    "-i",
                                    str(concat_path),
                                    "-c",
                                    "copy",
                                    str(target_path),
                                ],
                                capture_output=True,
                                text=True,
                            )
                            if result.returncode != 0:
                                raise RuntimeError(result.stderr.strip() or "ffmpeg 合并失败")  # noqa: TRY301

                            merged_info = f"\n已合并生成：{target_path}"
                            merged = True
                        except Exception as exc:  # noqa: BLE001
                            details = str(exc)
                            hint = (
                                "（pydub 不可用）"
                                if AudioSegment is None
                                else f"（pydub 错误：{pydub_error}）"
                                if pydub_error
                                else ""
                            )
                            messagebox.showwarning(
                                "提示",
                                "部分段落已合成，但合并失败："
                                f"{details}{hint}\n请确认 ffmpeg 已可用，或安装/修复 pydub 后重试。",
                            )
                        finally:
                            if concat_path is not None:
                                concat_path.unlink(missing_ok=True)
                    elif pydub_error:
                        messagebox.showwarning(
                            "提示",
                            "pydub 在合并过程中出现问题，但已尝试继续合并。",
                        )

                if success_paths:
                    generated_files = "\n".join(path.name for path in success_paths)
                    status_var.set("合成完成。")
                    messagebox.showinfo(
                        "成功",
                        f"生成的音频：\n{generated_files}{merged_info}",
                    )
                else:
                    status_var.set("合成失败，请查看提示。")

            root.after(0, finalize)

        threading.Thread(target=worker, daemon=True).start()

    create_segment()
    convert_button.configure(command=handle_conversion)

    root.mainloop()


if __name__ == "__main__":
    # Windows下打包成exe必须调用freeze_support()
    # 防止多进程创建时重复打开exe窗口
    multiprocessing.freeze_support()
    main()

