import argparse
import asyncio
import json
import os
import re
import shutil
import subprocess
import sys
import tempfile
import urllib.request
import urllib.error
from pathlib import Path
import ast
import time

try:
    from dotenv import load_dotenv
except Exception:
    def load_dotenv(*args, **kwargs):
        return False

try:
    import yaml
except Exception:
    yaml = None

try:
    from loguru import logger as _loguru_logger
    LOGGER = _loguru_logger
except Exception:
    class _SimpleLogger:
        def info(self, msg):
            print(msg)
        def warning(self, msg):
            print(msg)
        def error(self, msg):
            print(msg, file=sys.stderr)
    LOGGER = _SimpleLogger()

try:
    import edge_tts
except Exception:
    edge_tts = None

try:
    from mutagen.mp3 import MP3
except Exception:
    MP3 = None
try:
    from imageio_ffmpeg import get_ffmpeg_exe
except Exception:
    def get_ffmpeg_exe():
        return "ffmpeg"


def parse_resolution(s):
    if "x" in s:
        w, h = s.lower().split("x", 1)
    elif "," in s:
        w, h = s.split(",", 1)
    else:
        raise ValueError("invalid resolution format")
    return int(w), int(h)


def load_scenes(json_path: Path):
    with open(json_path, "r", encoding="utf-8") as f:
        data = json.load(f)
    scenes = data.get("scenes", [])
    return data, scenes


def concat_narration(scenes):
    texts = []
    for s in scenes:
        t = (s.get("narration") or "").strip()
        if t:
            texts.append(t)
    return "\n".join(texts).strip()


def _strip_code_blocks(md: str) -> str:
    # remove fenced code blocks
    return re.sub(r"```[\s\S]*?```", "", md, flags=re.MULTILINE)


def parse_markdown_to_scenes(md_text: str, scene_level: int = 2):
    md = _strip_code_blocks(md_text)
    lines = md.splitlines()
    scenes = []
    cur_title = None
    cur_buf = []
    first_h1 = None
    def flush_scene():
        nonlocal cur_title, cur_buf
        text = " ".join([t.strip() for t in cur_buf if t.strip()]).strip()
        if (cur_title or text):
            if not text and cur_title:
                text = cur_title
            scene = {
                "id": f"scene-{len(scenes)+1:03d}",
                "title": (cur_title or "").strip(),
                "narration": text,
            }
            scenes.append(scene)
        cur_title = None
        cur_buf = []
    for raw in lines:
        m = re.match(r"^(#{1,6})\s+(.*)$", raw)
        if m:
            level = len(m.group(1))
            title = m.group(2).strip()
            if level == 1 and not first_h1:
                first_h1 = title
            if level <= max(1, int(scene_level)):
                flush_scene()
                cur_title = title
                continue
        # list items -> keep as text
        if raw.strip().startswith(('-', '*', '+')):
            cur_buf.append(raw.strip())
        else:
            cur_buf.append(raw)
    flush_scene()
    meta = {
        "title": first_h1 or "Generated Video",
        "language": "zh-CN",
    }
    return {"version": "1.0", "meta": meta, "scenes": scenes}, scenes


def normalize_rate(r: str) -> str:
    r = (r or "").strip()
    if not r:
        return "+0%"
    if r.endswith("%"):
        return r
    # bare number -> percent
    try:
        float(r)
        if not r.startswith(("+", "-")):
            r = "+" + r
        return r + "%"
    except Exception:
        return "+0%"


def normalize_pitch(p: str) -> str:
    p = (p or "").strip()
    if not p:
        return "+0Hz"
    if p.endswith(("Hz", "%")):
        return p
    if p.endswith("st"):
        # convert semitone to percent: (2^(n/12)-1)*100
        try:
            val = float(p[:-2])
        except Exception:
            return "+0Hz"
        import math
        sign = "+" if val >= 0 else "-"
        percent = (math.pow(2.0, abs(val) / 12.0) - 1.0) * 100.0
        return f"{sign}{percent:.1f}%"
    # bare number -> percent
    try:
        float(p)
        if not p.startswith(("+", "-")):
            p = "+" + p
        return p + "%"
    except Exception:
        return "+0Hz"


async def _tts_save(text, voice, rate, pitch, audio_path: Path):
    if edge_tts is None:
        raise RuntimeError("edge-tts is not installed. Please `pip install edge-tts`." )
    rate = normalize_rate(rate)
    pitch = normalize_pitch(pitch)
    communicate = edge_tts.Communicate(text=text, voice=voice, rate=rate, pitch=pitch)
    await communicate.save(str(audio_path))


def synthesize_tts(text, voice, rate, pitch, audio_path: Path):
    last_err = None
    for i in range(3):
        try:
            asyncio.run(_tts_save(text, voice, rate, pitch, audio_path))
            return
        except Exception as e:
            last_err = e
            time.sleep(1.0 * (2 ** i))
    raise last_err


def format_srt_timestamp(t):
    h = int(t // 3600)
    m = int((t % 3600) // 60)
    s = int(t % 60)
    ms = int((t - int(t)) * 1000)
    return f"{h:02d}:{m:02d}:{s:02d},{ms:03d}"


def split_sentences(text):
    parts = re.split(r"(?<=[。！？.!?])\s*", text)
    return [p for p in parts if p.strip()]


def write_srt_by_proportion(text, total_duration, srt_path: Path):
    sentences = split_sentences(text)
    if not sentences:
        sentences = [text]
    lengths = [max(1, len(re.sub(r"\s+", "", s))) for s in sentences]
    total_len = sum(lengths)
    cur = 0.0
    with open(srt_path, "w", encoding="utf-8") as f:
        for i, (s, l) in enumerate(zip(sentences, lengths), 1):
            dur = total_duration * (l / total_len)
            start = cur
            end = min(total_duration, cur + dur)
            f.write(f"{i}\n")
            f.write(f"{format_srt_timestamp(start)} --> {format_srt_timestamp(end)}\n")
            f.write(s.strip() + "\n\n")
            cur = end


def _parse_duration_from_ffmpeg_log(log: str) -> float | None:
    # Example: Duration: 00:00:07.12, start: 0.000000, bitrate: 128 kb/s
    m = re.search(r"Duration:\s*(\d+):(\d+):(\d+)\.(\d+)", log)
    if not m:
        return None
    h, mi, se, frac = m.groups()
    total = int(h) * 3600 + int(mi) * 60 + int(se)
    try:
        total += float("0." + frac)
    except Exception:
        pass
    return float(total)


def get_audio_duration(audio_path: Path) -> float | None:
    # 1) mutagen (if available)
    if MP3 is not None:
        try:
            return float(MP3(str(audio_path)).info.length)
        except Exception:
            pass
    # 2) ffprobe if available
    ffprobe = shutil.which("ffprobe")
    if not ffprobe:
        ff = get_ffmpeg_exe()
        base = os.path.basename(ff)
        cand = ff.replace(base, "ffprobe") if base else "ffprobe"
        if os.path.exists(cand):
            ffprobe = cand
    if ffprobe:
        try:
            out = subprocess.check_output([ffprobe, "-v", "error", "-show_entries", "format=duration", "-of", "default=nw=1:nk=1", str(audio_path)], stderr=subprocess.STDOUT)
            return float(out.decode().strip())
        except Exception:
            pass
    # 3) parse ffmpeg -i output
    try:
        ff = get_ffmpeg_exe()
        p = subprocess.run([ff, "-i", str(audio_path)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        log = ((p.stderr or b"") + (p.stdout or b"")).decode(errors="ignore")
        dur = _parse_duration_from_ffmpeg_log(log)
        if dur:
            return dur
    except Exception:
        pass
    return None


def _llm_prompt_for_scenes(markdown_text: str) -> str:
    """
    获取场景生成的提示词（使用新的提示词管理器）
    """
    try:
        from prompts.templates import PromptManager
        manager = PromptManager()
        messages = manager.get_scene_generation_prompt(
            markdown_text,
            scene_level=2,
            use_few_shot=True
        )
        # 合并所有消息为单个提示
        return "\n\n".join([f"{m['role'].upper()}: {m['content']}" for m in messages])
    except ImportError:
        # 如果提示词管理器不可用，使用简单的提示
        return (
            "你是一名教学视频的脚本结构化助手。\n"
            "请将下面的 Markdown 文档转换为用于生成视频的场景 JSON，严格遵循以下结构：\n"
            "{\n"
            "  \"version\": \"1.0\",\n"
            "  \"meta\": {\"title\": \"...\", \"language\": \"zh-CN\"},\n"
            "  \"scenes\": [\n"
            "    {\n"
            "      \"id\": \"scene-001\",\n"
            "      \"title\": \"...\",\n"
            "      \"narration\": \"分镜讲述文本，尽量口语化\",\n"
            "      \"duration_hint_sec\": 8.0,\n"
            "      \"visuals\": [ { \"type\": \"text\", \"content\": \"...\" } ]\n"
            "    }\n"
            "  ]\n"
            "}\n"
            "输出要求：只输出 JSON（不需要解释、不要代码块围栏）。\n\n"
            "Markdown 文档：\n" + markdown_text
        )


def _try_load_json_loose(text: str) -> dict | None:
    t = text.strip()
    # strip code fences if present
    t = re.sub(r"^```(?:json|javascript|python)?\n|\n```$", "", t)
    try:
        return json.loads(t)
    except Exception:
        # best effort: find the first {...} block
        m = re.search(r"\{[\s\S]*\}", t)
        if m:
            try:
                return json.loads(m.group(0))
            except Exception:
                return None
    return None


def _validate_scenes_obj(obj: dict) -> bool:
    if not isinstance(obj, dict):
        return False
    scenes = obj.get("scenes")
    if not isinstance(scenes, list) or not scenes:
        return False
    ok = False
    for s in scenes:
        if not isinstance(s, dict):
            continue
        title = (s.get("title") or "").strip()
        narration = (s.get("narration") or "").strip()
        if title or narration:
            ok = True
            break
    return ok


def _llm_generate_scenes(provider: str, model: str, api_base: str | None, api_key: str | None, markdown_text: str) -> dict | None:
    if not provider or provider == "none" or not api_key:
        return None
    prompt = _llm_prompt_for_scenes(markdown_text)
    # reuse deepseek request path
    if provider.lower() == "deepseek":
        base = api_base.strip() if api_base else "https://api.deepseek.com"
        url = base.rstrip("/") + "/v1/chat/completions"
        payload = {
            "model": model or "deepseek-chat",
            "messages": [
                {"role": "system", "content": "你只需输出 JSON，不要任何解释。"},
                {"role": "user", "content": prompt},
            ],
            "temperature": 0.2,
        }
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {api_key}",
        }
        data = _http_post_json(url, payload, headers)
        if not data:
            return None
        try:
            content = data["choices"][0]["message"]["content"]
        except Exception:
            return None
        obj = _try_load_json_loose(content or "")
        if obj and _validate_scenes_obj(obj):
            return obj
        return None
    if provider.lower() in {"minimax", "gemini"}:
        if not api_base:
            return None
        url = api_base.rstrip("/") + "/v1/chat/completions"
        payload = {
            "model": model or ("gpt-4o" if provider.lower() == "gemini" else "minimax-m2"),
            "messages": [
                {"role": "system", "content": "你只需输出 JSON，不要任何解释。"},
                {"role": "user", "content": _llm_prompt_for_scenes(markdown_text)},
            ],
            "temperature": 0.2,
        }
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {api_key}",
        }
        data = _http_post_json(url, payload, headers)
        if not data:
            return None
        try:
            content = data["choices"][0]["message"]["content"]
        except Exception:
            return None
        obj = _try_load_json_loose(content or "")
        if obj and _validate_scenes_obj(obj):
            return obj
        return None
    # adapters end
    return None

def try_render_manim(scenes, resolution_str, fps, build_dir: Path, out_video: Path):
    script = build_dir / "manim_script.py"
    scene_texts = [ (s.get("narration") or s.get("title") or "").strip() for s in scenes ]
    content = "\n".join([
        "from manim import *",
        "",
        "class Generated(Scene):",
        "    def construct(self):",
        "        scene_texts = [",
        *[f"            {repr(t)}," for t in scene_texts if t],
        "        ]",
        "        for txt in scene_texts:",
        "            m = Text(txt, font_size=42)",
        "            self.play(Write(m))",
        "            self.wait(1)",
        "            self.play(FadeOut(m))",
        "",
    ])
    script.parent.mkdir(parents=True, exist_ok=True)
    script.write_text(content, encoding="utf-8")
    cmd = [
        "manim",
        "-r",
        resolution_str.replace("x", ","),
        "--fps",
        str(fps),
        "-o",
        "video_no_audio",
        script.name,
        "Generated",
    ]
    try:
        subprocess.run(cmd, cwd=build_dir, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    except FileNotFoundError:
        LOGGER.warning("Manim CLI not found in PATH. Skipping Manim render and falling back to text frames.")
        return False
    except subprocess.CalledProcessError as e:
        try:
            err = e.stderr.decode(errors="ignore")
        except Exception:
            err = str(e)
        LOGGER.warning("Manim render failed. stderr tail:\n" + err[-2000:])
        return False
    mp4 = None
    for p in build_dir.rglob("*.mp4"):
        if p.name.startswith("video_no_audio"):
            mp4 = p
            break
    if not mp4:
        cands = sorted(build_dir.rglob("*.mp4"), key=lambda p: p.stat().st_mtime, reverse=True)
        if cands:
            mp4 = cands[0]
    if not mp4:
        return False
    out_video.parent.mkdir(parents=True, exist_ok=True)
    shutil.copy2(mp4, out_video)
    return True


def render_fallback_blank(duration, resolution, fps, out_video: Path):
    ff = get_ffmpeg_exe()
    w, h = resolution
    out_video.parent.mkdir(parents=True, exist_ok=True)
    cmd = [
        ff,
        "-y",
        "-f", "lavfi",
        "-t", f"{duration:.3f}",
        "-i", f"color=c=black:size={w}x{h}:rate={fps}",
        "-pix_fmt", "yuv420p",
        "-c:v", "libx264",
        str(out_video),
    ]
    subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)


def mux_audio(video_path: Path, audio_path: Path, output_path: Path, fps, srt_path: Path | None = None, embed_subs: bool = False, burn_subs: bool = False):
    ff = get_ffmpeg_exe()
    output_path.parent.mkdir(parents=True, exist_ok=True)
    if burn_subs and srt_path and srt_path.exists():
        # hard-burn subtitles
        cmd = [
            ff, "-y",
            "-i", str(video_path),
            "-i", str(audio_path),
            "-vf", f"subtitles={str(srt_path.resolve())}",
            "-pix_fmt", "yuv420p",
            "-c:v", "libx264",
            "-c:a", "aac",
            "-shortest",
            str(output_path),
        ]
    elif embed_subs and srt_path and srt_path.exists():
        cmd = [
            ff, "-y",
            "-i", str(video_path),
            "-i", str(audio_path),
            "-i", str(srt_path),
            "-map", "0:v:0",
            "-map", "1:a:0",
            "-map", "2:0",
            "-c:v", "copy",
            "-c:a", "aac",
            "-c:s", "mov_text",
            "-metadata:s:s:0", "language=chi",
            "-shortest",
            str(output_path),
        ]
    else:
        cmd = [
            ff,
            "-y",
            "-i", str(video_path),
            "-i", str(audio_path),
            "-c:v", "copy",
            "-c:a", "aac",
            "-shortest",
            str(output_path),
        ]
    subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)


def _find_font_path() -> str | None:
    candidates = [
        "/System/Library/Fonts/PingFang.ttc",
        "/System/Library/Fonts/STHeiti Light.ttc",
        "/Library/Fonts/Arial Unicode.ttf",
        "/Library/Fonts/Arial Unicode MS.ttf",
    ]
    for p in candidates:
        if os.path.exists(p):
            return p
    return None


def render_fallback_text(scenes, total_duration: float, resolution, fps: int, out_video: Path, theme: str = "dark"):
    ff = get_ffmpeg_exe()
    w, h = resolution
    build = out_video.parent / "fallback_text"
    build.mkdir(parents=True, exist_ok=True)

    # decide per-scene durations by hint or text length proportion
    texts = []
    weights = []
    for s in scenes:
        t = (s.get("narration") or s.get("title") or "").strip()
        if not t:
            continue
        texts.append(t)
        hint = s.get("duration_hint_sec")
        if isinstance(hint, (int, float)) and hint > 0:
            weights.append(float(hint))
        else:
            weights.append(max(1.0, len(t)))
    if not texts:
        return render_fallback_blank(total_duration, resolution, fps, out_video)
    total_w = sum(weights)
    segs = [max(1.0, total_duration * (wgt / total_w)) for wgt in weights]

    # Prefer ffmpeg drawtext (no Pillow dependency)
    font_path = _find_font_path()
    if theme not in {"dark", "light"}:
        theme = "dark"
    bg_color = (16, 16, 16) if theme == "dark" else (240, 240, 240)
    fg_color = (240, 240, 240) if theme == "dark" else (16, 16, 16)
    scene_files = []
    if font_path:
        try:
            for idx, (text, seg_dur) in enumerate(zip(texts, segs), 1):
                txtfile = build / f"scene_{idx:03d}.txt"
                txtfile.write_text(text, encoding="utf-8")
                clip_path = build / f"scene_{idx:03d}.mp4"
                draw = (
                    f"drawtext=fontfile={font_path}:textfile={txtfile}:fontsize={max(24, int(h*0.06))}:"
                    f"fontcolor={'white' if theme=='dark' else 'black'}:x=(w-text_w)/2:y=(h-text_h)/2:line_spacing={int(h*0.03)}"
                )
                cmd = [
                    ff,
                    "-y",
                    "-f", "lavfi",
                    "-t", f"{seg_dur:.3f}",
                    "-i", f"color=c={'black' if theme=='dark' else 'white'}:size={w}x{h}:rate={fps}",
                    "-vf", draw,
                    "-pix_fmt", "yuv420p",
                    "-c:v", "libx264",
                    str(clip_path),
                ]
                subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                scene_files.append(clip_path)
        except Exception as e:
            LOGGER.warning(f"ffmpeg drawtext fallback failed: {e}. Trying Pillow fallback.")
            scene_files = []

    if not scene_files:
        try:
            from PIL import Image, ImageDraw, ImageFont
        except Exception:
            LOGGER.info("Pillow not available; falling back to solid color video.")
            return render_fallback_blank(total_duration, resolution, fps, out_video)

        try:
            font = ImageFont.truetype(font_path, size=max(24, int(h * 0.06))) if font_path else ImageFont.load_default()
        except Exception:
            font = ImageFont.load_default()

        for idx, (text, seg_dur) in enumerate(zip(texts, segs), 1):
            img = Image.new("RGB", (w, h), bg_color)
            draw = ImageDraw.Draw(img)
            max_chars = max(8, int(w / (font.size * 1.2)))
            lines, cur = [], ""
            for ch in text:
                if ch == "\n" or len(cur) >= max_chars:
                    if cur:
                        lines.append(cur)
                    cur = "" if ch == "\n" else ch
                else:
                    cur += ch
            if cur:
                lines.append(cur)
            line_h = int(font.size * 1.4)
            block_h = line_h * len(lines)
            y = (h - block_h) // 2
            for i, line in enumerate(lines):
                tw = draw.textlength(line, font=font)
                x = int((w - tw) // 2)
                draw.text((x, y + i * line_h), line, font=font, fill=fg_color)
            img_path = build / f"scene_{idx:03d}.png"
            img.save(img_path)
            clip_path = build / f"scene_{idx:03d}.mp4"
            cmd = [
                ff,
                "-y",
                "-loop", "1",
                "-t", f"{seg_dur:.3f}",
                "-i", str(img_path),
                "-r", str(fps),
                "-pix_fmt", "yuv420p",
                "-c:v", "libx264",
                str(clip_path),
            ]
            subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            scene_files.append(clip_path)

    # concat using filter_complex for robustness
    abs_files = [str(p.resolve()) for p in scene_files]
    if len(abs_files) == 1:
        shutil.copy2(abs_files[0], out_video)
    else:
        cmd_concat = [ff, "-y"]
        for fpath in abs_files:
            cmd_concat += ["-i", fpath]
        filter_arg = f"concat=n={len(abs_files)}:v=1:a=0"
        cmd_concat += [
            "-filter_complex", filter_arg,
            "-pix_fmt", "yuv420p",
            "-r", str(fps),
            "-c:v", "libx264",
            str(out_video),
        ]
        subprocess.run(cmd_concat, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)


def _env_api_key(provider: str) -> str | None:
    if provider == "deepseek":
        return os.getenv("DEEPSEEK_API_KEY")
    if provider == "minimax":
        return os.getenv("MINIMAX_API_KEY")
    if provider == "gemini":
        return os.getenv("GEMINI_API_KEY")
    return None


def _resolve_api_key(provider: str, cfg: dict | None = None) -> str | None:
    p = (provider or "").lower()
    # 1) YAML config: api_keys mapping or provider-specific key fields
    if cfg and isinstance(cfg, dict):
        try:
            api_keys = cfg.get("api_keys")
            if isinstance(api_keys, dict):
                v = api_keys.get(p) or api_keys.get(provider)
                if v:
                    return v
        except Exception:
            pass
        for cand in (f"{p}_api_key", "api_key", "key"):
            try:
                v = cfg.get(cand)
                if v:
                    return v
            except Exception:
                pass
    # 2) .env / environment
    return _env_api_key(p)


def _http_post_json(url: str, payload: dict, headers: dict, retries: int = 2, backoff: float = 1.0) -> dict | None:
    last_err = None
    for i in range(retries + 1):
        try:
            data = json.dumps(payload).encode("utf-8")
            req = urllib.request.Request(url, data=data, headers=headers, method="POST")
            with urllib.request.urlopen(req, timeout=120) as resp:
                body = resp.read().decode("utf-8", errors="ignore")
                return json.loads(body)
        except Exception as e:
            last_err = e
            if i < retries:
                time.sleep(backoff * (2 ** i))
    LOGGER.error(f"HTTP POST failed: {last_err}")
    return None


def _llm_prompt_for_manim(scenes, captions_text: str | None, resolution: str, fps: int) -> str:
    """
    获取Manim脚本生成的提示词（使用新的提示词管理器）
    """
    try:
        from prompts.templates import PromptManager
        manager = PromptManager()
        messages = manager.get_manim_generation_prompt(
            scenes,
            captions_text or "",
            resolution,
            fps,
            use_few_shot=True
        )
        # 合并所有消息为单个提示
        return "\n\n".join([f"{m['role'].upper()}: {m['content']}" for m in messages])
    except ImportError:
        # 如果提示词管理器不可用，使用简单的提示
        lines = []
        lines.append("你是一名 Manim 动画工程师。根据给定的场景信息与字幕，生成一个 Manim 脚本。")
        lines.append("要求：")
        lines.append("1. 必须输出完整的 Python 代码，且包含 `from manim import *` 和 `class Generated(Scene)`，在 `construct` 中实现。")
        lines.append("2. 禁止任何网络/文件写入操作，不要读取外部资源。")
        lines.append("3. 使用 Text/MathTex 等基础元素，演示逐场景的呈现与过渡。")
        lines.append(f"4. 目标视频参数：分辨率 {resolution}，FPS {fps}。")
        lines.append("")
        lines.append("场景信息(JSON 简述)：")
        for s in scenes:
            title = (s.get("title") or "").strip()
            narration = (s.get("narration") or "").strip()
            lines.append(f"- 标题: {title} / 台词: {narration}")
        if captions_text:
            lines.append("")
            lines.append("字幕(SRT)：")
            lines.append(captions_text)
        return "\n".join(lines)


def _llm_generate_manim_script(provider: str, model: str, api_base: str | None, api_key: str | None, prompt: str) -> str | None:
    if not provider or provider == "none":
        return None
    if not api_key:
        return None
    provider = provider.lower()
    if provider == "deepseek":
        base = api_base.strip() if api_base else "https://api.deepseek.com"
        url = base.rstrip("/") + "/v1/chat/completions"
        payload = {
            "model": model or "deepseek-chat",
            "messages": [
                {"role": "system", "content": "你只需输出 Python 代码，不要任何解释。"},
                {"role": "user", "content": prompt},
            ],
            "temperature": 0.3,
        }
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {api_key}",
        }
        data = _http_post_json(url, payload, headers)
        if not data:
            return None
        try:
            content = data["choices"][0]["message"]["content"]
        except Exception:
            return None
        if not content:
            return None
        # strip code fences if any
        content = re.sub(r"^```(?:python)?\n|\n```$", "", content.strip())
        return content
    if provider in {"minimax", "gemini"}:
        if not api_base:
            return None
        url = api_base.rstrip("/") + "/v1/chat/completions"
        payload = {
            "model": model or ("gpt-4o" if provider == "gemini" else "minimax-m2"),
            "messages": [
                {"role": "system", "content": "你只需输出 Python 代码，不要任何解释。"},
                {"role": "user", "content": prompt},
            ],
            "temperature": 0.3,
        }
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {api_key}",
        }
        data = _http_post_json(url, payload, headers)
        if not data:
            return None
        try:
            content = data["choices"][0]["message"]["content"]
        except Exception:
            return None
        if not content:
            return None
        content = re.sub(r"^```(?:python)?\n|\n```$", "", content.strip())
        return content
    return None


def _is_manim_script_safe(code: str) -> tuple[bool, str]:
    allow_modules = {"manim", "math", "random"}
    deny_modules = {
        "os", "sys", "subprocess", "socket", "http", "urllib", "requests", "shutil", "pathlib", "builtins",
    }
    deny_calls = {"open", "eval", "exec", "__import__"}
    try:
        tree = ast.parse(code)
    except Exception as e:
        return False, f"parse error: {e}"
    for node in ast.walk(tree):
        if isinstance(node, ast.Import):
            for a in node.names:
                root = (a.name or "").split(".")[0]
                if root in deny_modules:
                    return False, f"deny import: {root}"
                if root not in allow_modules:
                    # allow manim submodules implicitly
                    pass
        elif isinstance(node, ast.ImportFrom):
            root = (node.module or "").split(".")[0]
            if root in deny_modules:
                return False, f"deny from-import: {root}"
        elif isinstance(node, ast.Call):
            # direct call name
            fn = node.func
            if isinstance(fn, ast.Name) and fn.id in deny_calls:
                return False, f"deny call: {fn.id}"
            if isinstance(fn, ast.Attribute):
                attr = fn.attr
                if attr in {"system", "popen", "run", "remove", "rmtree", "unlink"}:
                    return False, f"deny attr call: {attr}"
    return True, ""


def _parse_srt(srt_path: Path):
    with srt_path.open("r", encoding="utf-8") as f:
        content = f.read()
    blocks = [b.strip() for b in content.split("\n\n") if b.strip()]
    items = []
    for b in blocks:
        lines = b.splitlines()
        if len(lines) < 2:
            continue
        idx_line = lines[0].strip()
        time_line = lines[1].strip()
        m = re.match(r"(\d\d:\d\d:\d\d,\d\d\d)\s*-->\s*(\d\d:\d\d:\d\d,\d\d\d)", time_line)
        if not m:
            continue
        def parse_ts(ts):
            h, m_, s_ms = ts.split(":", 2)
            s, ms = s_ms.split(",", 1)
            return int(h)*3600 + int(m_)*60 + int(s) + int(ms)/1000.0
        start = parse_ts(m.group(1))
        end = parse_ts(m.group(2))
        text = "\n".join(lines[2:]).strip()
        try:
            idx = int(idx_line)
        except Exception:
            idx = None
        items.append({"idx": idx, "start": start, "end": end, "text": text})
    return items


def _write_srt(items, out_path: Path):
    def fmt(t):
        h = int(t // 3600)
        m_ = int((t % 3600) // 60)
        s = int(t % 60)
        ms = int(round((t - int(t)) * 1000))
        return f"{h:02d}:{m_:02d}:{s:02d},{ms:03d}"
    with out_path.open("w", encoding="utf-8") as f:
        for i, it in enumerate(items, 1):
            f.write(f"{i}\n")
            f.write(f"{fmt(max(0.0,it['start']))} --> {fmt(max(it['start']+0.01, it['end']))}\n")
            f.write((it.get("text") or "") + "\n\n")


def _align_srt_to_silence(audio_path: Path, srt_in: Path, srt_out: Path):
    # simple alignment: enforce monotonically increasing, clamp to [0, duration]
    dur = get_audio_duration(audio_path) or 0
    items = _parse_srt(srt_in)
    if not items:
        shutil.copy2(srt_in, srt_out)
        return
    last_end = 0.0
    aligned = []
    for it in items:
        start = max(last_end, max(0.0, it["start"]))
        end = max(start + 0.2, it["end"])  # ensure min length
        if dur and end > dur:
            end = dur
        last_end = end
        aligned.append({"start": start, "end": end, "text": it.get("text")})
    _write_srt(aligned, srt_out)


def _validate_scenes_with_schema(data: dict) -> tuple[bool, list[str]]:
    """使用 Schema 验证场景数据"""
    try:
        from schemas.scenes_schema import validate_scenes_json
        return validate_scenes_json(data, strict=True)
    except ImportError:
        # 如果验证器不可用，跳过验证
        return True, []


def main():
    # load .env first to allow API keys
    try:
        load_dotenv()
    except Exception:
        pass

    parser = argparse.ArgumentParser()
    parser.add_argument("input_path")
    parser.add_argument("--config", default="")
    parser.add_argument("--provider", default="none")
    parser.add_argument("--model", default="")
    parser.add_argument("--api-base", default="")
    parser.add_argument("--use-llm-manim", action="store_true")
    parser.add_argument("--use-llm-scenes", action="store_true")
    parser.add_argument("--md-scene-level", type=int, default=2)
    parser.add_argument("--voice", default="zh-CN-XiaoxiaoNeural")
    parser.add_argument("--rate", default="+0%")
    parser.add_argument("--pitch", default="+0Hz")
    parser.add_argument("--resolution", default="1280x720")
    parser.add_argument("--fps", type=int, default=30)
    parser.add_argument("--outdir", default="outputs")
    parser.add_argument("--no-manim", action="store_true")
    parser.add_argument("--overwrite", action="store_true")
    parser.add_argument("--embed-subtitles", action="store_true")
    parser.add_argument("--burn-subtitles", action="store_true")
    parser.add_argument("--align-subs", action="store_true")
    parser.add_argument("--theme", default="dark")
    parser.add_argument("--dry-run", action="store_true")
    parser.add_argument("--cache", action="store_true", help="启用缓存")
    parser.add_argument("--cache-dir", default="", help="缓存目录（默认: outputs/.cache）")
    parser.add_argument("--clear-cache", action="store_true", help="清除缓存并退出")
    args = parser.parse_args()

    in_path = Path(args.input_path)
    out_dir = Path(args.outdir)
    out_dir.mkdir(parents=True, exist_ok=True)

    # 加载配置文件
    cfg = {}
    if args.config:
        config_path = Path(args.config)
        if config_path.exists():
            try:
                if yaml:
                    with open(config_path, 'r', encoding='utf-8') as f:
                        cfg = yaml.safe_load(f) or {}
                else:
                    LOGGER.warning("YAML模块不可用，将忽略配置文件")
            except Exception as e:
                LOGGER.warning(f"无法加载配置文件: {e}")

    # 初始化缓存管理器
    cache_enabled = args.cache
    if not cache_enabled and cfg:
        cache_enabled = cfg.get("cache", False)
    cache_dir = args.cache_dir if args.cache_dir else (out_dir / ".cache")
    pipeline_cache = None
    if cache_enabled:
        try:
            from utils.cache import PipelineCache
            pipeline_cache = PipelineCache(Path(cache_dir), enabled=True)
            LOGGER.info(f"缓存已启用: {cache_dir}")
        except ImportError:
            LOGGER.warning("缓存模块不可用，将禁用缓存")
            pipeline_cache = None
    else:
        LOGGER.info("缓存已禁用")

    # 处理清除缓存请求
    if args.clear_cache:
        if pipeline_cache:
            pipeline_cache.invalidate()
            LOGGER.info("缓存已清除")
        else:
            LOGGER.info("缓存未启用")
        sys.exit(0)

    # optional config loading (support config.yaml + config.local.yaml overlay)
    cfg_path = Path(args.config) if args.config else (Path.cwd() / "config.yaml")
    cfg = None
    if cfg_path.exists() and yaml is not None:
        try:
            with cfg_path.open("r", encoding="utf-8") as f:
                cfg = yaml.safe_load(f) or {}
        except Exception:
            cfg = None
    cfg_local = None
    cfg_local_path = Path.cwd() / "config.local.yaml"
    if cfg_local_path.exists() and yaml is not None:
        try:
            with cfg_local_path.open("r", encoding="utf-8") as f:
                cfg_local = yaml.safe_load(f) or {}
        except Exception:
            cfg_local = None
    # shallow merge: config.local.yaml overrides config.yaml
    if isinstance(cfg, dict) or isinstance(cfg_local, dict):
        merged = {}
        if isinstance(cfg, dict):
            merged.update(cfg)
        if isinstance(cfg_local, dict):
            merged.update(cfg_local)
        cfg = merged
    # apply config defaults if args at defaults
    def apply_cfg(key, default_attr, attr_name=None):
        if cfg is None:
            return
        k = key
        a = attr_name or default_attr
        defaults = {
            "provider": "none",
            "model": "",
            "api_base": "",
            "voice": "zh-CN-XiaoxiaoNeural",
            "rate": "+0%",
            "pitch": "+0Hz",
            "resolution": "1280x720",
            "fps": 30,
            "outdir": "outputs",
            "no_manim": False,
            "overwrite": False,
            "embed_subtitles": False,
            "burn_subtitles": False,
            "align_subs": False,
            "theme": "dark",
            "use_llm_manim": False,
            "use_llm_scenes": False,
            "md_scene_level": 2,
        }
        if k in cfg and hasattr(args, a) and getattr(args, a) == defaults.get(a, None):
            setattr(args, a, cfg[k])

    if cfg:
        apply_cfg("provider", "provider")
        apply_cfg("model", "model")
        apply_cfg("api_base", "api_base", attr_name="api_base")
        apply_cfg("voice", "voice")
        apply_cfg("rate", "rate")
        apply_cfg("pitch", "pitch")
        apply_cfg("resolution", "resolution")
        apply_cfg("fps", "fps")
        apply_cfg("outdir", "outdir")
        apply_cfg("no_manim", "no_manim")
        apply_cfg("overwrite", "overwrite")
        apply_cfg("embed_subtitles", "embed_subtitles")
        apply_cfg("burn_subtitles", "burn_subtitles")
        apply_cfg("align_subs", "align_subs")
        apply_cfg("theme", "theme")
        apply_cfg("use_llm_manim", "use_llm_manim")
        apply_cfg("use_llm_scenes", "use_llm_scenes")
        apply_cfg("md_scene_level", "md_scene_level")

    if not in_path.exists():
        LOGGER.error("input_path not found")
        sys.exit(2)

    if in_path.suffix.lower() == ".json":
        data, scenes = load_scenes(in_path)
    elif in_path.suffix.lower() == ".md":
        md_text = in_path.read_text(encoding="utf-8")
        used_llm = False
        if args.use_llm_scenes and args.provider and args.provider != "none":
            api_key = _resolve_api_key(args.provider, cfg)
            if api_key:
                obj = _llm_generate_scenes(args.provider, args.model, args.api_base, api_key, md_text)
                if obj and _validate_scenes_obj(obj):
                    data = obj
                    scenes = obj.get("scenes", [])
                    used_llm = True
        if not used_llm:
            data, scenes = parse_markdown_to_scenes(md_text, scene_level=args.md_scene_level)
            # auto-adjust for typical docs using ### as scene titles
            if not scenes and "###" in md_text:
                data, scenes = parse_markdown_to_scenes(md_text, scene_level=3)
        # export scenes.json for inspection
        try:
            (out_dir / "scenes.json").write_text(json.dumps(data, ensure_ascii=False, indent=2), encoding="utf-8")
        except Exception:
            pass
    else:
        LOGGER.error("unsupported input file type")
        sys.exit(2)

    # 使用 Schema 验证场景数据
    is_valid, schema_errors = _validate_scenes_with_schema(data)
    if not is_valid:
        LOGGER.warning("场景数据 Schema 验证失败:")
        for error in schema_errors:
            LOGGER.warning(f"  - {error}")
        LOGGER.warning("将尝试继续执行，但可能遇到问题")
    else:
        LOGGER.info("场景数据 Schema 验证通过")

    narration_all = concat_narration(scenes)
    if not narration_all:
        LOGGER.error("no narration text found in scenes")
        sys.exit(2)

    audio_path = out_dir / "audio.mp3"
    srt_path = out_dir / "captions.srt"
    resolution = parse_resolution(args.resolution)

    if args.dry_run:
        LOGGER.info("dry-run: skip TTS and rendering")
        sys.exit(0)

    # 尝试从缓存获取音频
    audio_cached = False
    if pipeline_cache and not args.overwrite:
        audio_cached = pipeline_cache.get_cached_audio(
            narration_all, args.voice, args.rate, args.pitch, audio_path
        )
        if audio_cached:
            LOGGER.info("从缓存加载音频")

    if not audio_cached and (args.overwrite or not audio_path.exists()):
        try:
            synthesize_tts(narration_all, args.voice, args.rate, args.pitch, audio_path)
            # 缓存音频
            if pipeline_cache:
                pipeline_cache.cache_audio(
                    narration_all, args.voice, args.rate, args.pitch, audio_path
                )
        except Exception as e:
            LOGGER.error(f"TTS failed: {e}")
            sys.exit(2)
    elif not audio_cached:
        LOGGER.info("音频文件已存在，跳过TTS（使用 --overwrite 重新生成）")

    duration = get_audio_duration(audio_path)
    if duration is None:
        LOGGER.error("failed to detect audio duration; please install mutagen or ensure ffmpeg/ffprobe is available")
        sys.exit(2)
    if args.overwrite or not srt_path.exists():
        write_srt_by_proportion(narration_all, duration, srt_path)
    else:
        LOGGER.info("captions.srt exists, skip SRT generation (use --overwrite to regenerate)")

    # Optional: align subtitles by detecting silence
    if args.align_subs and srt_path.exists():
        try:
            aligned = out_dir / "captions_aligned.srt"
            _align_srt_to_silence(audio_path, srt_path, aligned)
            if aligned.exists():
                srt_path = aligned
                LOGGER.info("captions aligned to silence segments")
        except Exception as e:
            LOGGER.warning(f"align subtitles failed: {e}")

    build_dir = out_dir / "manim_build"
    video_no_audio = out_dir / "video_no_audio.mp4"
    ok = False
    if not args.no_manim and args.use_llm_manim and args.provider and args.provider != "none":
        try:
            captions_text = srt_path.read_text(encoding="utf-8") if srt_path.exists() else None
            prompt = _llm_prompt_for_manim(scenes, captions_text, args.resolution, args.fps)
            api_key = _resolve_api_key(args.provider, cfg)
            script_txt = _llm_generate_manim_script(args.provider, args.model, args.api_base, api_key, prompt)
            if script_txt and ("class Generated" in script_txt or "class Generated(Scene)" in script_txt):
                safe, reason = _is_manim_script_safe(script_txt)
                if not safe:
                    LOGGER.warning(f"LLM script failed safety check: {reason}. Skipping LLM render.")
                    raise RuntimeError("unsafe llm script")
                build_dir.mkdir(parents=True, exist_ok=True)
                llm_script = build_dir / "manim_script_llm.py"
                llm_script.write_text(script_txt, encoding="utf-8")
                cmd = [
                    "manim",
                    "-r", args.resolution.replace("x", ","),
                    "--fps", str(args.fps),
                    "-o", "video_no_audio",
                    llm_script.name,
                    "Generated",
                ]
                try:
                    subprocess.run(cmd, cwd=build_dir, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                    # copy latest mp4
                    cand = None
                    for p in build_dir.rglob("*.mp4"):
                        if p.name.startswith("video_no_audio"):
                            cand = p
                            break
                    if cand:
                        video_no_audio.parent.mkdir(parents=True, exist_ok=True)
                        shutil.copy2(cand, video_no_audio)
                        ok = True
                except Exception as e:
                    ok = False
        except Exception:
            ok = False
    if not ok and not args.no_manim:
        if args.overwrite or not video_no_audio.exists():
            ok = try_render_manim(scenes, args.resolution, args.fps, build_dir, video_no_audio)
        else:
            LOGGER.info("video_no_audio exists, skip Manim render (use --overwrite or --no-manim)")
            ok = True
    if not ok:
        try:
            if args.overwrite or not video_no_audio.exists():
                render_fallback_text(scenes, duration, resolution, args.fps, video_no_audio, theme=args.theme)
            else:
                LOGGER.info("video_no_audio exists, skip fallback render (use --overwrite)")
        except Exception as e:
            LOGGER.warning(f"Text fallback failed: {e}. Falling back to solid color.")
            try:
                if args.overwrite or not video_no_audio.exists():
                    render_fallback_blank(duration, resolution, args.fps, video_no_audio)
                else:
                    LOGGER.info("video_no_audio exists, skip solid color render (use --overwrite)")
            except Exception as e2:
                LOGGER.error(f"Render failed: {e2}")
                sys.exit(2)

    final_path = out_dir / "final.mp4"
    try:
        if args.overwrite or not final_path.exists():
            mux_audio(video_no_audio, audio_path, final_path, args.fps, srt_path, embed_subs=args.embed_subtitles, burn_subs=args.burn_subtitles)
        else:
            LOGGER.info("final.mp4 exists, skip mux (use --overwrite to regenerate)")
    except Exception as e:
        LOGGER.error(f"Mux failed: {e}")
        sys.exit(2)

    LOGGER.info(str(final_path))


if __name__ == "__main__":
    main()
