﻿import os
import sys
import json
import glob
import textwrap
import subprocess
from pathlib import Path
import hashlib
import time
from datetime import datetime
from typing import List, Dict, Optional, Tuple


def info(msg: str):
    print(f"[INFO] {msg}")


def warn(msg: str):
    print(f"[WARN] {msg}")


def error(msg: str):
    print(f"[ERROR] {msg}", file=sys.stderr)


def run(cmd: List[str], cwd: Optional[str] = None, check: bool = True) -> subprocess.CompletedProcess:
    info(f"$ {' '.join(cmd)}")
    return subprocess.run(cmd, cwd=cwd, check=check, text=True, capture_output=True, encoding="utf-8", errors="replace")


def run_codex_exec(prompt: str, codex_bin: Optional[str] = None) -> str:
    """
    Call `codex exec` with the given prompt and return stdout.

    Notes:
    - Requires Codex CLI to be installed and available on PATH as `codex`.
    - Keeps interface minimal to reduce coupling with CLI flags.
    """
    # Safety: avoid accidental multiline shell parsing issues by passing as an arg.
    import shutil, platform, tempfile
    system = platform.system().lower()

    # Resolve codex binary
    candidates = []
    if codex_bin:
        candidates.append(codex_bin)
    # Try common Windows launcher extensions
    candidates.extend(["codex", "codex.cmd", "codex.exe"])

    resolved: Optional[str] = None
    for name in candidates:
        path = shutil.which(name)
        if path:
            resolved = path
            break

    # Prepare a temp file for large prompts to avoid Windows command line limits
    import tempfile
    tmp_path = None
    try:
        with tempfile.NamedTemporaryFile("w", encoding="utf-8", delete=False, suffix=".txt") as tf:
            tf.write(prompt)
            tmp_path = tf.name

        # Strategy 1: prefer stdin with explicit '-' to signal stdin
        if resolved:
            try:
                with open(tmp_path, "r", encoding="utf-8") as fin:
                    proc0 = subprocess.run([resolved, "exec", "--skip-git-repo-check", "-"], stdin=fin, text=True, capture_output=True, encoding="utf-8", errors="replace")
                if proc0.returncode == 0:
                    return proc0.stdout
            except Exception:
                pass

            # Strategy 1b: try without '-' but with stdin
            try:
                with open(tmp_path, "r", encoding="utf-8") as fin:
                    proc = subprocess.run([resolved, "exec", "--skip-git-repo-check"], stdin=fin, text=True, capture_output=True, encoding="utf-8", errors="replace")
                if proc.returncode == 0:
                    return proc.stdout
            except Exception:
                pass

            # Do NOT pass the whole prompt as an argument on Windows (length limit)
            # Instead, try shell piping through cmd/bash using the temp file
            if "windows" in system:
                cmdline = f'type "{tmp_path}" | "{resolved}" exec --skip-git-repo-check -'
                proc2 = subprocess.run(cmdline, text=True, capture_output=True, shell=True, encoding="utf-8", errors="replace")
                if proc2.returncode == 0:
                    return proc2.stdout
                # Try without '-'
                cmdline2 = f'type "{tmp_path}" | "{resolved}" exec --skip-git-repo-check'
                proc3 = subprocess.run(cmdline2, text=True, capture_output=True, shell=True, encoding="utf-8", errors="replace")
                if proc3.returncode == 0:
                    return proc3.stdout
                raise RuntimeError(f"codex exec failed via stdin on Windows: {(proc3.stderr or proc2.stderr).strip()}")
            else:
                # Non-Windows: use bash to redirect stdin from file
                proc2 = subprocess.run(["bash", "-lc", f"codex exec --skip-git-repo-check - < '{tmp_path}'"], text=True, capture_output=True, encoding="utf-8", errors="replace")
                if proc2.returncode == 0:
                    return proc2.stdout
                proc3 = subprocess.run(["bash", "-lc", f"codex exec --skip-git-repo-check < '{tmp_path}'"], text=True, capture_output=True, encoding="utf-8", errors="replace")
                if proc3.returncode == 0:
                    return proc3.stdout
                raise RuntimeError(f"codex exec failed via stdin on bash: {(proc3.stderr or proc2.stderr).strip()}")

        # If binary not resolved, still attempt shell-invocation using PATH
        if "windows" in system:
            cmdline = f'type "{tmp_path}" | codex exec --skip-git-repo-check -'
            proc = subprocess.run(cmdline, text=True, capture_output=True, shell=True, encoding="utf-8", errors="replace")
            if proc.returncode == 0:
                return proc.stdout
            cmdline2 = f'type "{tmp_path}" | codex exec --skip-git-repo-check'
            proc2 = subprocess.run(cmdline2, text=True, capture_output=True, shell=True, encoding="utf-8", errors="replace")
            if proc2.returncode == 0:
                return proc2.stdout
            raise RuntimeError(f"codex exec failed via cmd PATH: {(proc2.stderr or proc.stderr).strip()}")

        # Non-Windows PATH case
        proc = subprocess.run(["bash", "-lc", f"codex exec --skip-git-repo-check - < '{tmp_path}'"], text=True, capture_output=True, encoding="utf-8", errors="replace")
        if proc.returncode == 0:
            return proc.stdout
        proc2 = subprocess.run(["bash", "-lc", f"codex exec --skip-git-repo-check < '{tmp_path}'"], text=True, capture_output=True, encoding="utf-8", errors="replace")
        if proc2.returncode == 0:
            return proc2.stdout
        raise RuntimeError(f"codex exec failed via bash PATH: {proc2.stderr.strip() or proc.stderr.strip()}")
    finally:
        if tmp_path:
            try:
                os.unlink(tmp_path)
            except Exception:
                pass


def read_text(path: Path, limit_chars: Optional[int] = None) -> str:
    data = path.read_text(encoding="utf-8", errors="ignore")
    if limit_chars and len(data) > limit_chars:
        return data[:limit_chars] + "\n\n...[truncated]...\n"
    return data


def ensure_dir(p: Path):
    p.mkdir(parents=True, exist_ok=True)


class PipelineConfig:
    def __init__(
        self,
        links_file: str = "./links.txt",
        crawler_script: str = "crwaler_links.py",
        articles_dir: str = "articles",
        output_dir: str = "output",
        articles_out: Optional[str] = None,
        browser: str = "chromium",
        skip_crawl: bool = False,
        start: Optional[int] = None,
        end: Optional[int] = None,
        anchors_file: Optional[str] = None,
        eeat_profile: Optional[str] = None,
        gemini_api_key: Optional[str] = None,
        codex_bin: Optional[str] = None,
        # Integrated search + hashing
        query: Optional[str] = None,
        theme: Optional[str] = None,
        runs_root: str = "runs",
        hash_algo: str = "sha1",
        hash_len: int = 12,
        aihubmix_api_key: Optional[str] = None,
        serper_api_key: Optional[str] = None,
        search_delay: float = 1.0,
        max_links: Optional[int] = None,
        # Direct composition options
        compose_direct: bool = False,
        max_chars_per_source: int = 15000,
        # Optional anchor injection via CLI
        anchor_text: Optional[str] = None,
        anchor_url: Optional[str] = None,
        # Crawler controls
        crawler_timeout: int = 60,
        crawler_settle: float = 1.0,
        crawler_workers: int = 1,
        crawler_headless: bool = False,
        crawler_page_load_strategy: str = "normal",
        crawler_user_data_dir: Optional[str] = None,
        crawler_raw_html_out: Optional[str] = None,
        summary_workers: int = 3,
        # WeCom sending
        wecom_send: bool = False,
        wecom_webhook: Optional[str] = None,
    ):
        self.links_file = links_file
        self.crawler_script = crawler_script
        self.articles_dir = articles_dir
        self.output_dir = output_dir
        self.articles_out = articles_out
        self.browser = browser
        self.skip_crawl = skip_crawl
        self.start = start
        self.end = end
        self.anchors_file = anchors_file
        self.eeat_profile = eeat_profile
        self.gemini_api_key = gemini_api_key or os.environ.get("GEMINI_API_KEY")
        self.codex_bin = codex_bin or os.environ.get("CODEX_BIN")
        self.query = query
        self.theme = theme
        self.runs_root = runs_root
        self.hash_algo = hash_algo
        self.hash_len = hash_len
        self.aihubmix_api_key = aihubmix_api_key or os.environ.get("AIHUBMIX_API_KEY")
        self.serper_api_key = serper_api_key or os.environ.get("SERPER_API_KEY")
        self.search_delay = search_delay
        self.max_links = max_links
        self.compose_direct = compose_direct
        self.max_chars_per_source = max_chars_per_source
        # Anchor injection (CLI-preferred)
        self.anchor_text = anchor_text
        self.anchor_url = anchor_url
        # Crawler controls
        self.crawler_timeout = crawler_timeout
        self.crawler_settle = crawler_settle
        self.crawler_workers = crawler_workers
        self.crawler_headless = crawler_headless
        self.crawler_page_load_strategy = crawler_page_load_strategy
        self.crawler_user_data_dir = crawler_user_data_dir
        self.crawler_raw_html_out = crawler_raw_html_out

        self.summary_workers = summary_workers
        # WeCom options
        self.wecom_send = wecom_send
        self.wecom_webhook = wecom_webhook or os.environ.get("WECOM_WEBHOOK")

class AutoBlogPipeline:
    def __init__(self, cfg: PipelineConfig):
        self.cfg = cfg
        # Derive hashed run directory if a query is provided
        if cfg.query:
            run_hash = self._hash_text(cfg.query, algo=cfg.hash_algo, length=cfg.hash_len)
            self.base_dir = Path(cfg.runs_root) / run_hash
            # Repoint all paths into the run folder
            cfg.links_file = (self.base_dir / "links.txt").as_posix()
            cfg.output_dir = (self.base_dir / "output").as_posix()
        else:
            self.base_dir = Path('.')

        # Determine final articles directory with precedence:
        # 1) --articles-out if provided
        # 2) runs/<hash>/articles when --query is provided
        # 3) --articles-dir (default: articles)
        if cfg.articles_out:
            cfg.articles_dir = cfg.articles_out
        elif cfg.query:
            cfg.articles_dir = (self.base_dir / "articles").as_posix()

        self.out_dir = Path(cfg.output_dir)
        self.debug_dir = self.out_dir / "_debug"
        self.images_dir = self.out_dir / "images"
        ensure_dir(self.out_dir)
        ensure_dir(self.debug_dir)
        ensure_dir(self.images_dir)

    # ----------------------------
    # Quality review before image generation
    # ----------------------------
    def review_article_with_codex(self, md_text: str) -> str:
        """
        Use Codex to audit and correct the article Markdown with these rules:
        1) Remove any lines containing 'tokens used' (case-insensitive) anywhere.
        2) Ensure the article contains no Chinese characters; output must be English only.
        3) Remove brand names from the prose; DO NOT remove or alter the client brand 'mvraki' (any case variants allowed). Do not modify URLs.
        4) Proofread for logical consistency and clarity; fix contradictions and obvious logic issues without inventing facts.

        Returns corrected Markdown. If Codex fails, apply minimal local sanitization for (1) and (2) as fallback.
        """
        # Build prompt instructing Codex to output only corrected Markdown
        prompt = textwrap.dedent(f"""
        You are an expert copy editor. Review and correct the following Markdown article.

        Strict requirements:
        - Remove any line that contains 'tokens used' (case-insensitive) anywhere in the document.
        - Ensure there is no Chinese text or punctuation; the output must be entirely in English.
        - Remove brand names (proper nouns that are commercial brands) from the prose, EXCEPT the client brand 'mvraki' which must be preserved as-is. Do not alter 'mvraki' in any case form. Do not modify URLs.
        - Proofread for logic and clarity; fix contradictions and obvious logical errors. Do not invent facts.
        - Preserve the structure (headings, tables, lists if any were present) and the author’s intended meaning.
        - Keep Markdown formatting intact.
        - Output only the corrected Markdown content; no explanations, no metadata, no code fences.

        <article>
        {md_text}
        </article>
        """).strip()

        try:
            out = run_codex_exec(prompt, codex_bin=self.cfg.codex_bin).strip()
            # Strip common code fences if present
            if out.startswith("```") and out.endswith("```"):
                # Remove first and last fence lines
                lines = out.splitlines()
                if len(lines) >= 2:
                    out = "\n".join(lines[1:-1]).strip()
            reviewed = out
        except Exception as e:
            warn(f"Codex review failed: {e}")
            reviewed = md_text

        # Minimal local fallback to enforce key constraints
        try:
            import re
            # 1) Remove any 'tokens used' lines anywhere
            reviewed = re.sub(r"(?im)^.*tokens\s*used.*$\n?", "", reviewed)
            # 2) Remove Chinese characters and full-width punctuation
            reviewed = re.sub(r"[\u3000-\u303F\u3400-\u9FFF\uF900-\uFAFF\uFF00-\uFFEF]", "", reviewed)
        except Exception:
            pass

        return reviewed

    @staticmethod
    def _strip_preamble_before_first_h1(md_text: str) -> str:
        """
        Remove any leading content before the first Markdown H1 heading.
        H1 is defined as a line matching: ^\s*#\s+\S
        If no H1 is found, return original text.
        """
        import re
        lines = (md_text or "").splitlines()
        idx = None
        for i, ln in enumerate(lines):
            if re.match(r"^\s*#\s+\S", ln.lstrip('\ufeff')):
                idx = i
                break
        if idx is None:
            return md_text
        out = "\n".join(lines[idx:])
        if not out.endswith("\n"):
            out += "\n"
        return out

    # ----------------------------
    # Utilities: image link rewrite/copy
    # ----------------------------
    def _rewrite_images_to_output(self, md_path: Path) -> int:
        """
        Copy local image files referenced in markdown into output/images and
        rewrite links to relative paths (images/<filename>).

        Returns number of images rewritten.
        """
        try:
            text = md_path.read_text(encoding="utf-8", errors="ignore")
        except Exception as e:
            warn(f"Failed to read markdown for image rewrite: {e}")
            return 0

        import re, shutil
        # Regex for markdown images: ![alt](path "title") or ![](path)
        # Allow spaces in paths; capture optional quoted title
        img_re = re.compile(r"!\[[^\]]*\]\(([^)]+?)(?:\s+(?:\&quot;[^\&]*\&quot;|\"[^\"]*\"))?\)")

        matches = list(img_re.finditer(text))
        if not matches:
            return 0

        rewrites: Dict[str, str] = {}
        copied = 0
        for m in matches:
            src = m.group(1).strip()
            if not src or src.lower().startswith("http://") or src.lower().startswith("https://"):
                continue
            # Normalize path separators
            src_norm = src.replace("\\", "/")
            # If already pointing to output/images, skip
            if src_norm.startswith("images/") or src_norm.startswith("./images/"):
                continue
            src_path = Path(src_norm)
            if not src_path.is_absolute():
                # resolve relative to the markdown's folder first
                cand1 = (md_path.parent / src_path)
                if cand1.exists():
                    src_path = cand1
                else:
                    src_path = src_path.resolve()
            if not src_path.exists() or not src_path.is_file():
                continue
            dest_name = src_path.name
            dest_path = self.images_dir / dest_name
            # Ensure unique filename if collision
            if dest_path.exists():
                stem = dest_path.stem
                suf = dest_path.suffix
                k = 1
                while dest_path.exists():
                    dest_path = self.images_dir / f"{stem}_{k}{suf}"
                    k += 1
            try:
                shutil.copy2(src_path, dest_path)
                copied += 1
                rewrites[src] = (Path("images") / dest_path.name).as_posix()
            except Exception as e:
                warn(f"Copy image failed: {src_path} -> {dest_path}: {e}")

        if not rewrites:
            return 0

        def _sub(m: re.Match) -> str:
            orig = m.group(1).strip()
            new = rewrites.get(orig)
            if not new:
                # try normalized key
                new = rewrites.get(orig.replace("\\", "/"))
            if not new:
                return m.group(0)
            return m.group(0).replace(m.group(1), new)

        new_text = img_re.sub(_sub, text)
        try:
            md_path.write_text(new_text, encoding="utf-8")
            info(f"Rewrote {len(rewrites)} image link(s) to output/images (copied {copied}).")
        except Exception as e:
            warn(f"Failed to write markdown after image rewrite: {e}")
        return len(rewrites)

    # ----------------------------
    # Utilities: convert markdown to DOCX
    # ----------------------------
    def _convert_markdown_to_docx(self, md_path: Path) -> Optional[Path]:
        """Convert markdown to a .docx file next to it. Returns the .docx path on success."""
        docx_path = md_path.with_suffix('.docx')
        md_dir = md_path.parent
        # Try pypandoc first
        try:
            import pypandoc  # type: ignore
            try:
                # Ensure images resolve relative to the markdown file directory and autolink bare URLs
                pypandoc.convert_file(
                    str(md_path),
                    'docx',
                    outputfile=str(docx_path),
                    extra_args=['--from=gfm+autolink_bare_uris', '--resource-path', str(md_dir)],
                )
                info(f"DOCX saved via pypandoc (resource-path set) -> {docx_path.as_posix()}")
                return docx_path
            except OSError:
                # pandoc not found; try to auto-download a portable pandoc
                try:
                    pypandoc.download_pandoc()
                    pypandoc.convert_file(
                        str(md_path),
                        'docx',
                        outputfile=str(docx_path),
                        extra_args=['--from=gfm+autolink_bare_uris', '--resource-path', str(md_dir)],
                    )
                    info(f"DOCX saved via pypandoc (downloaded pandoc, resource-path set) -> {docx_path.as_posix()}")
                    return docx_path
                except Exception as e2:
                    warn(f"pypandoc auto-download failed: {e2}")
        except Exception:
            pass
        # Try pandoc CLI
        try:
            import shutil
            pandoc_bin = shutil.which('pandoc')
            if pandoc_bin:
                # Run with cwd at the markdown directory so relative image paths resolve
                proc = subprocess.run(
                    [pandoc_bin, '-f', 'gfm+autolink_bare_uris', md_path.name, '-o', docx_path.name],
                    cwd=str(md_dir),
                    text=True,
                    capture_output=True,
                    encoding='utf-8',
                    errors='replace',
                )
                if proc.returncode == 0:
                    info(f"DOCX saved via pandoc (cwd set) -> {docx_path.as_posix()}")
                    return docx_path
                else:
                    warn(f"pandoc failed: {proc.stderr.strip()}")
        except Exception as e:
            warn(f"pandoc invocation error: {e}")
        # Try minimal python-docx fallback
        try:
            from docx import Document  # type: ignore
            from docx.shared import Inches  # type: ignore
            import re
            doc = Document()
            text = md_path.read_text(encoding='utf-8', errors='ignore')
            lines = text.splitlines()
            # Allow spaces in paths; capture optional quoted title
            img_re = re.compile(r"!\[[^\]]*\]\(([^)]+?)(?:\s+\"[^\"]*\")?\)")
            for raw in lines:
                s = raw.lstrip('\ufeff')
                if s.startswith('# '):
                    doc.add_heading(s[2:].strip(), level=1)
                elif s.startswith('## '):
                    doc.add_heading(s[3:].strip(), level=2)
                elif s.startswith('### '):
                    doc.add_heading(s[4:].strip(), level=3)
                else:
                    m = img_re.search(s)
                    if m:
                        pth = m.group(1).strip().replace('\\', '/')
                        if not (pth.lower().startswith('http://') or pth.lower().startswith('https://')):
                            p = (md_path.parent / pth).resolve()
                            if p.exists():
                                try:
                                    doc.add_picture(str(p))
                                except Exception:
                                    doc.add_paragraph(s)
                            else:
                                doc.add_paragraph(s)
                        else:
                            doc.add_paragraph(s)
                    else:
                        doc.add_paragraph(s)
            doc.save(str(docx_path))
            info(f"DOCX saved via python-docx -> {docx_path.as_posix()}")
            return docx_path
        except Exception as e:
            warn(f"python-docx conversion failed: {e}")
        warn("DOCX conversion skipped (no pandoc/pypandoc/python-docx available)")
        return None

    # ----------------------------
    # Step 0.5: SEO meta generation helpers
    # ----------------------------
    def generate_seo_metadata(self, md_text: str, theme: Optional[str] = None) -> Dict[str, str]:
        """
        Use Codex to propose an SEO-ready English Title and Description.
        Constraints:
        - Title ~50-60 chars, includes core keyword(s)
        - Description ~150-160 chars, highlights core keywords
        - Do NOT use words like "Discover" or "Explore"
        - Do NOT include URL handle, emojis, quotes, site/brand names
        Returns a dict: {"title": str, "description": str}
        """
        theme_text = theme or ""
        prompt = textwrap.dedent(f"""
        You will craft SEO metadata for a blog article.
        Produce JSON with fields: title, description.
        Requirements:
        - English only
        - Title: 50-60 characters; include the primary keyword(s)
        - Description: 150-160 characters; emphasize the core keyword(s)
        - Avoid the words "Discover" and "Explore"
        - Do not include URL handles, emojis, quotes, site or brand names
        - Use natural, informative phrasing in active voice
        - No trailing punctuation clutter; no pipes or brackets

        If a theme is provided, align the keywords with it:
        THEME: {theme_text}

        Article Markdown:
        {md_text}
        """
        ).strip()
        out = run_codex_exec(prompt, codex_bin=self.cfg.codex_bin)
        meta = self._extract_json(out) or {}
        title = str(meta.get("title") or "").strip()
        desc = str(meta.get("description") or "").strip()
        # Post-process to enforce constraints
        title = self._sanitize_meta_text(title)
        desc = self._sanitize_meta_text(desc)
        title = self._limit_at_word_boundary(title, 60)
        desc = self._limit_at_word_boundary(desc, 160)
        result = {"title": title, "description": desc}
        try:
            (self.debug_dir / "seo_meta.json").write_text(json.dumps(result, ensure_ascii=False, indent=2), encoding="utf-8")
        except Exception:
            pass
        return result

    @staticmethod
    def _sanitize_meta_text(text: str) -> str:
        """Remove or replace disallowed words and URL handle mentions."""
        if not text:
            return text
        repl = (
            (r"\b[Dd]iscover\b", "Learn"),
            (r"\b[Ee]xplore\b", "Learn"),
            (r"url\s*handle", ""),
            (r"URL\s*handle", ""),
        )
        import re
        out = text
        for pat, sub in repl:
            out = re.sub(pat, sub, out)
        # Trim extra spaces introduced by removals
        return " ".join(out.split())

    @staticmethod
    def _limit_at_word_boundary(text: str, max_len: int) -> str:
        if not text or len(text) <= max_len:
            return text
        # Cut and then backtrack to the last space to avoid breaking words
        cut = text[: max_len + 1]
        if " " in cut:
            cut = cut.rsplit(" ", 1)[0]
        return cut.rstrip(" -|,:;?")

    # ----------------------------
    # Step 9: References from links.txt
    # ----------------------------
    def build_references_from_links_file(self, max_count: int = 10) -> List[str]:
        """Read links.txt and select up to max_count authoritative URLs."""
        try:
            links_path = Path(self.cfg.links_file)
            if not links_path.exists():
                return []
            raw = links_path.read_text(encoding="utf-8", errors="ignore").splitlines()
            links = [ln.strip() for ln in raw if ln.strip() and (ln.startswith("http://") or ln.startswith("https://"))]
            if not links:
                return []
        except Exception:
            return []

        # Filter + score
        from urllib.parse import urlparse
        blocked_roots = {
            "reddit.com", "instagram.com", "youtube.com", "youtu.be", "pinterest.com",
            "tiktok.com", "facebook.com", "twitter.com", "x.com", "medium.com", "quora.com",
        }
        authority_bonus = {
            # Gemological & standards bodies
            "gia.edu": 8,
            "4cs.gia.edu": 8,
            "igi.org": 6,
            "hrdantwerp.com": 5,
            "ags.org": 5,
            # Encyclopedic / reputable info
            "britannica.com": 4,
            "wikipedia.org": 2,  # fallback if nothing else
            # Major auction houses / institutions
            "christies.com": 3,
            "sothebys.com": 3,
            # Government / education generic handled by TLD below
        }

        def root_domain(netloc: str) -> str:
            parts = netloc.lower().split('.')
            if len(parts) >= 2:
                return ".".join(parts[-2:])
            return netloc.lower()

        def tld_score(netloc: str) -> int:
            d = netloc.lower()
            if d.endswith('.gov') or d.endswith('.gov.uk'):
                return 5
            if d.endswith('.edu') or d.endswith('.ac.uk'):
                return 5
            if d.endswith('.org'):
                return 3
            if d.endswith('.net'):
                return 1
            return 0

        scored = []
        for u in links:
            try:
                p = urlparse(u)
                netloc = (p.netloc or '').lower()
                if not netloc:
                    continue
                rd = root_domain(netloc)
                # Block certain roots
                if rd in blocked_roots or netloc in blocked_roots:
                    continue
                score = 0
                score += tld_score(netloc)
                # Known authority bonuses for either full host or root
                score += authority_bonus.get(netloc, 0)
                score += authority_bonus.get(rd, 0)
                # Penalize obvious tracking/query noise
                if 'utm_' in u or 'fbclid=' in u:
                    score -= 1
                # Prefer shorter, canonical paths slightly
                score -= min(len(p.path.split('/')), 6) * 0.1
                scored.append((score, rd, u))
            except Exception:
                continue

        # Sort by score desc, then by root domain to diversify
        scored.sort(key=lambda x: (-x[0], x[1], x[2]))
        picked: List[str] = []
        seen_roots = set()
        for score, rd, u in scored:
            if rd in seen_roots and sum(1 for _, r2, _ in scored if r2 == rd) > 1:
                # Allow at most one per root by default to diversify
                continue
            seen_roots.add(rd)
            picked.append(u)
            if len(picked) >= max_count:
                break

        # Fallback: if empty, take up to max_count first valid links
        if not picked:
            picked = links[:max_count]

        try:
            (self.debug_dir / "refs_selected.json").write_text(json.dumps({"picked": picked}, ensure_ascii=False, indent=2), encoding="utf-8")
        except Exception:
            pass
        return picked

    @staticmethod
    def append_references_section(md_text: str, refs: List[str]) -> str:
        if not refs:
            return md_text
        # If a References section already exists, just append links at the end of doc
        import re
        refs_block = ["## References", ""]
        for i, u in enumerate(refs, 1):
            # Wrap URL in angle brackets to ensure clickable links in Markdown/Pandoc
            refs_block.append(f"{i}. <{u}>")
        refs_block.append("")
        block = "\n".join(refs_block)
        if re.search(r"(?mi)^## +References\s*$", md_text):
            return md_text.rstrip() + "\n\n" + "\n".join(f"{i}. <{u}>" for i, u in enumerate(refs, 1)) + "\n"
        return md_text.rstrip() + "\n\n" + block

    # ----------------------------
    # Step 0: Search links (optional)
    # ----------------------------
    def search_links(self):
        if not self.cfg.query:
            return
        if not self.cfg.aihubmix_api_key:
            raise RuntimeError("AIHUBMIX_API_KEY not set (or --aihubmix-api-key provided).")
        if not self.cfg.serper_api_key:
            raise RuntimeError("SERPER_API_KEY not set (or --serper-api-key provided).")

        info("Collecting links from query via Gemini+Serper...")
        try:
            from use_gemini_search import generate_topic_and_keywords, search_with_serper  # type: ignore
        except Exception as e:
            raise RuntimeError(f"Failed to import use_gemini_search.py: {e}")
        result = generate_topic_and_keywords(self.cfg.query, self.cfg.aihubmix_api_key)
        if not result:
            raise RuntimeError("Failed to generate keywords from query.")

        # Try to locate the keywords list key without relying on specific locale strings
        kw_key = None
        try:
            for k in list(result.keys()):
                if isinstance(k, str):
                    low = k.lower()
                    if ("keyword" in low) or ("\u5173\u952e" in k) or ("\u5173\u952e\u8bcd" in k):
                        kw_key = k
                        break
        except Exception:
            pass
        if not kw_key:
            raise RuntimeError("No keywords field in model result.")
        keywords = result[kw_key]
        seen = set()
        links: List[str] = []
        for i, kw in enumerate(keywords, 1):
            info(f"Search [{i}/{len(keywords)}]: {kw}")
            try:
                found = search_with_serper(kw, self.cfg.serper_api_key)
            except Exception as e:
                warn(f"Search error for '{kw}': {e}")
                found = []
            # Filter blocked domains and common social/video sites
            try:
                from urllib.parse import urlparse
                roots = ("reddit.com", "instagram.com", "youtube.com", "youtu.be", "pinterest.com","quroa.com")
                filtered = []
                for u in found:
                    if not isinstance(u, str) or not (u.startswith("http://") or u.startswith("https://")):
                        continue
                    try:
                        netloc = urlparse(u).netloc.lower()
                    except Exception:
                        continue
                    if any(netloc == r or netloc.endswith("." + r) for r in roots):
                        continue
                    filtered.append(u)
                found = filtered
            except Exception:
                pass
            for url in found:
                if isinstance(url, str) and (url.startswith("http://") or url.startswith("https://")):
                    if url not in seen:
                        seen.add(url)
                        links.append(url)
            time.sleep(max(0.0, self.cfg.search_delay))

        if self.cfg.max_links and len(links) > self.cfg.max_links:
            links = links[: self.cfg.max_links]

        # Save under run directory
        ensure_dir(self.base_dir)
        Path(self.cfg.links_file).write_text("\n".join(links) + "\n", encoding="utf-8")
        (self.debug_dir / "search_topic_keywords.json").write_text(json.dumps(result, ensure_ascii=False, indent=2), encoding="utf-8")
        (self.debug_dir / "search_links.json").write_text(json.dumps(links, ensure_ascii=False, indent=2), encoding="utf-8")
        info(f"Collected {len(links)} unique links -> {self.cfg.links_file}")
        if not links:
            raise RuntimeError("Search returned 0 links; please refine --query or check API keys.")

    # ----------------------------
    # Step 1: Crawl
    # ----------------------------
    def crawl_articles(self):
        if self.cfg.skip_crawl:
            info("skip_crawl=True; skip crawling step.")
            return

        # If articles already exist, avoid re-crawling by default
        existing = list(Path(self.cfg.articles_dir).glob("*.md"))
        if existing:
            info(f"Found {len(existing)} existing articles in '{self.cfg.articles_dir}', skipping crawl.")
            return

        # Build command: python crwaler_links.py -i links.txt -o articles
        py = os.environ.get("PYTHON", "python")
        cmd = [
            py, self.cfg.crawler_script,
            "-i", self.cfg.links_file,
            "-o", self.cfg.articles_dir,
            "-b", self.cfg.browser,
        ]
        # Pass extended timing options
        if self.cfg.crawler_timeout:
            cmd += ["-t", str(self.cfg.crawler_timeout)]
        if self.cfg.crawler_settle is not None:
            cmd += ["--settle", str(self.cfg.crawler_settle)]
        # Pass Playwright crawler options
        if getattr(self.cfg, 'crawler_page_load_strategy', None):
            cmd += ["--page-load-strategy", self.cfg.crawler_page_load_strategy]
        if getattr(self.cfg, 'crawler_user_data_dir', None):
            cmd += ["--user-data-dir", self.cfg.crawler_user_data_dir]
        if getattr(self.cfg, 'crawler_workers', None):
            try:
                w = int(self.cfg.crawler_workers)
                if w > 0:
                    cmd += ["-w", str(w)]
            except Exception:
                pass
        if getattr(self.cfg, 'crawler_headless', False):
            cmd += ["--headless"]
        if getattr(self.cfg, 'crawler_raw_html_out', None):
            cmd += ["--raw-html-out", self.cfg.crawler_raw_html_out]
        if self.cfg.start is not None:
            cmd += ["--start", str(self.cfg.start)]
        if self.cfg.end is not None:
            cmd += ["--end", str(self.cfg.end)]

        # Always capture stdout/stderr and save, even on failure
        proc = subprocess.run(cmd, text=True, capture_output=True, encoding="utf-8", errors="replace")
        (self.debug_dir / "crawler.stdout.log").write_text(proc.stdout or "", encoding="utf-8", errors="ignore")
        (self.debug_dir / "crawler.stderr.log").write_text(proc.stderr or "", encoding="utf-8", errors="ignore")
        if proc.returncode != 0:
            # Surface a concise snippet to the console for quick diagnosis
            tail_err = (proc.stderr or "").strip().splitlines()[-10:]
            tail_out = (proc.stdout or "").strip().splitlines()[-10:]
            error("Crawler exited with non-zero status.")
            if tail_err:
                error("Last stderr lines:")
                for ln in tail_err:
                    error(ln)
            if tail_out:
                error("Last stdout lines:")
                for ln in tail_out:
                    error(ln)
            error(f"See logs -> stdout: {self.debug_dir / 'crawler.stdout.log'} | stderr: {self.debug_dir / 'crawler.stderr.log'}")
            raise RuntimeError("Crawler failed; see logs for details.")

    # ----------------------------
    # Step 2: Load corpus
    # ----------------------------
    def load_corpus(self, limit_each_chars: int = 15000) -> List[Tuple[Path, str]]:
        folder = Path(self.cfg.articles_dir)
        files = sorted(folder.glob("*.md"))
        if not files:
            raise RuntimeError(f"No markdown files found in '{folder}'. Did the crawl succeed?")
        corpus: List[Tuple[Path, str]] = []
        for fp in files:
            corpus.append((fp, read_text(fp, limit_chars=limit_each_chars)))
        info(f"Loaded {len(corpus)} article(s) from '{folder}'.")
        return corpus

    # ----------------------------
    # Step 3: Summarize sources with Codex
    # ----------------------------
    def summarize_sources(self, corpus: List[Tuple[Path, str]]) -> List[Dict]:
        print("开始总结文章...\n")
        # Deduplicate by path
        seen = set()
        ordered: List[Tuple[int, Path, str]] = []
        for i, (path, content) in enumerate(corpus, start=1):
            p = path.as_posix()
            if p in seen:
                continue
            seen.add(p)
            ordered.append((i, path, content))

        results: Dict[int, Dict] = {}
        to_run: List[Tuple[int, Path, str]] = []
        # Reuse existing summaries if present
        for idx, path, content in ordered:
            sp = self.debug_dir / f"summary_{idx:02d}.json"
            try:
                if sp.exists():
                    js = json.loads(sp.read_text(encoding="utf-8", errors="ignore"))
                    if isinstance(js, dict) and js:
                        results[idx] = js
                        continue
            except Exception:
                pass
            to_run.append((idx, path, content))

        def _do_job(idx: int, path: Path, content: str) -> Tuple[int, Dict]:
            prompt = textwrap.dedent(f"""
            Summarize the following article into structured bullet notes for a research brief.
            Keep to 8-14 bullets. Include: key facts, definitions, recommendations, and any stats.
            Output JSON with fields: source, title_guess, bullets, Output only the final Markdown article; do not include any tool banners, timestamps, environment metadata, or debug lines. Start directly with the article heading.
            <source path="{path.as_posix()}">
            {content}
            </source>
            """).strip()
            last_err = None
            for attempt in range(1, 4):
                try:
                    out = run_codex_exec(prompt, codex_bin=self.cfg.codex_bin)
                    parsed = self._extract_json(out)
                    if not parsed:
                        parsed = {
                            "source": path.as_posix(),
                            "title_guess": None,
                            "bullets": [line.strip("- ") for line in out.splitlines() if line.strip()],
                        }
                    (self.debug_dir / f"summary_{idx:02d}.json").write_text(json.dumps(parsed, ensure_ascii=False, indent=2), encoding="utf-8")
                    info(f"Created summary {idx:02d} -> {path.name}")
                    return idx, parsed
                except Exception as e:
                    last_err = e
                    warn(f"Summarize failed idx={idx} attempt={attempt}: {e}")
                    time.sleep(min(2*attempt, 5))
            # Fallback
            fb = {"source": path.as_posix(), "title_guess": None, "bullets": []}
            try:
                (self.debug_dir / f"summary_{idx:02d}.json").write_text(json.dumps(fb, ensure_ascii=False, indent=2), encoding="utf-8")
            except Exception:
                pass
            return idx, fb

        # Concurrency
        from concurrent.futures import ThreadPoolExecutor, as_completed
        max_workers = 3
        try:
            max_workers = max(1, int(getattr(self.cfg, 'summary_workers', 3) or 3))
        except Exception:
            max_workers = 3

        futures = []
        with ThreadPoolExecutor(max_workers=max_workers) as ex:
            for idx, path, content in to_run:
                info(f"[Summary] Submit idx={idx} file={path.name}")
                futures.append(ex.submit(_do_job, idx, path, content))
            for fut in as_completed(futures):
                try:
                    idx, data = fut.result()
                    results[idx] = data
                except Exception as e:
                    warn(f"Summary future error: {e}")

        # Build ordered list
        summaries: List[Dict] = []
        for idx, path, _ in ordered:
            if idx in results:
                summaries.append(results[idx])
            else:
                summaries.append({"source": path.as_posix(), "title_guess": None, "bullets": []})
        info(f"Created {len(summaries)} summaries.")
        return summaries
    # ----------------------------
    # Step 4: Draft blog with Codex
    # ----------------------------
    def draft_blog(self, theme, summaries: List[Dict], eeat_profile: Optional[str]) -> str:
        summaries_json = json.dumps(summaries, ensure_ascii=False)
        profile_text = eeat_profile or ""
        prompt = textwrap.dedent(f"""
        You are an expert editorial writer for a jewelry blog.
        Using the following research notes (JSON), write a comprehensive English blog article on {theme}.
        Requirements:
        - Clear structure with H2/H3 headings , article title should be H1 
        - High EEAT: demonstrate first-hand experience, cite reputable knowledge briefly
        - Practical advice, definitions, pros/cons, care and buying tips
        - Neutral, informative tone; avoid fluff; 1500-3000 words
        - Do not use bullet or numbered lists; write in natural paragraphs. Convert any enumerations into coherent sentences with transitions. Before final output, ensure there are no lines starting with '-' or '*' or a digit followed by '.' or ')'; rewrite any such lists into paragraphs.        - You may use concise Markdown tables to present data or comparisons (with clear headers).
        - Include a concise intro and a takeaway section
        - Add a short FAQ (4-6 Q&A)
        - Output only the final Markdown article; do not include any tool banners, timestamps, environment metadata, or debug lines. Start directly with the article heading.
        Do not fabricate facts beyond the notes; synthesize them clearly.

        If provided, adopt this author profile for EEAT:
        {profile_text}

        Research notes (JSON):
        {summaries_json}
        """
        ).strip()
        draft = run_codex_exec(prompt, codex_bin=self.cfg.codex_bin)
        (self.debug_dir / "draft_raw.md").write_text(draft, encoding="utf-8")
        return draft

    def draft_blog_from_sources(self, theme: str, corpus: List[Tuple[Path, str]], eeat_profile: Optional[str]) -> str:
        """
        Compose the blog directly from full source articles (skip summarization).
        """
        profile_text = eeat_profile or ""
        # Build delimited sources
        src_blocks = []
        for path, content in corpus:
            src_blocks.append(f"<source path=\"{path.as_posix()}\">\n{content}\n</source>")
        sources_joined = "\n\n".join(src_blocks)
        prompt = textwrap.dedent(f"""
        You are an expert editorial writer for a jewelry blog.
        Using the provided source articles (as <source> blocks), write a comprehensive English blog article on {theme}.
        Requirements:
        - Clear structure with H2/H3 headings
        - High EEAT: demonstrate first-hand experience; ground claims in the sources
        - Practical advice, definitions, pros/cons, care and buying tips
        - Neutral, informative tone; avoid fluff; 1500-3000 words
        - Do not use bullet or numbered lists; write in natural paragraphs. Convert any enumerations into coherent sentences with transitions.
        - You may use concise Markdown tables to present data or comparisons (with clear headers).
        - Include a concise intro and a takeaway section
        - Add a short FAQ (4-6 Q&A)
        Do not fabricate facts beyond the sources; synthesize clearly and attribute concepts when needed.

        If provided, adopt this author profile for EEAT:
        {profile_text}

        Source articles:
        {sources_joined}
        """)
        draft = run_codex_exec(prompt, codex_bin=self.cfg.codex_bin)
        (self.debug_dir / "draft_raw.md").write_text(draft, encoding="utf-8")
        return draft

    # ----------------------------
    # Step 5: Insert anchors with Codex
    # ----------------------------
    def insert_anchors(self, draft_md: str, anchors: Dict[str, str]) -> str:
        mapping_json = json.dumps(anchors, ensure_ascii=False, indent=2)
        prompt = textwrap.dedent(f"""
        Insert anchor links into the following Markdown draft.
        - Use the mapping from keyword/phrase to URL (JSON below)
        - Link each keyword at most once per major section
        - Keep anchor text natural; do not overlink; avoid keyword stuffing
        - Preserve headings and formatting

        Mapping JSON:
        {mapping_json}

        Draft Markdown:
        {draft_md}
        """
        ).strip()
        anchored = run_codex_exec(prompt, codex_bin=self.cfg.codex_bin)
        (self.debug_dir / "draft_anchored.md").write_text(anchored, encoding="utf-8")
        return anchored

    # ----------------------------
    # Step 6: Image plan with Codex
    # ----------------------------
    def image_plan(self, draft_md: str, count: int) -> List[Dict]:
        prompt = textwrap.dedent(f"""
        Read the article below and propose {count} images to illustrate it.
        For each image, output JSON with fields: filename, prompt, alt, caption, insert_after_heading.
        - filename: descriptive kebab-case, no spaces, ends with .png
        - prompt: detailed photorealistic description suitable for a jewelry product/lifestyle shot
        - alt: succinct and descriptive for accessibility
        - caption: 1 sentence max
        - insert_after_heading: the H2/H3 heading text after which to place the image

        Article Markdown:
        {draft_md}
        """
        ).strip()
        out = run_codex_exec(prompt, codex_bin=self.cfg.codex_bin)
        plan = self._extract_json_list(out)
        (self.debug_dir / "image_plan.json").write_text(json.dumps(plan, ensure_ascii=False, indent=2), encoding="utf-8")
        return plan

    # ----------------------------
    # Step 7: Generate images with Gemini (optional)
    # ----------------------------
    def generate_images_with_gemini(self, plan: List[Dict]) -> List[str]:
        saved: List[str] = []
        if not plan:
            return saved
        api_key = self.cfg.gemini_api_key
        if not api_key:
            warn("GEMINI_API_KEY not set; skipping image generation.")
            return saved

        # Lazy import to keep optional.
        try:
            import google.generativeai as genai  # type: ignore
        except Exception as e:
            warn(f"google-generativeai not installed ({e}); skipping images.")
            return saved

        # Note: Image generation API naming may evolve. This block attempts a best-effort call.
        try:
            genai.configure(api_key=api_key)
            # Known image generation model family is Imagen. Model names vary by region/version.
            # Common choices include 'imagen-3.0' or 'imagen-3.0-generate'.
            # We try a couple options for robustness.
            model_candidates = [
                "imagen-3.0-generate",
                "imagen-3.0",
            ]
            model = None
            for name in model_candidates:
                try:
                    model = genai.GenerativeModel(name)
                    break
                except Exception:
                    continue
            if model is None:
                warn("No suitable Gemini/Imagen model available; skipping images.")
                return saved

            # Attempt generation per item
            for item in plan:
                prompt = item.get("prompt") or ""
                filename = item.get("filename") or f"image_{len(saved)+1}.png"
                target = (self.images_dir / filename).as_posix()
                try:
                    # Newer SDKs expose an Images API; fallback to model.generate_content if needed.
                    img_bytes: Optional[bytes] = None
                    try:
                        # Hypothetical direct Images API (if available)
                        images_api = getattr(genai, "Images", None)
                        if images_api:
                            res = images_api.generate(
                                prompt=prompt,
                                size=self.cfg.image_size,
                                number_of_images=1,
                            )
                            # Expect first image bytes
                            if hasattr(res, "images") and res.images:
                                first = res.images[0]
                                # Try to get raw bytes
                                if hasattr(first, "image"):
                                    img_bytes = first.image
                                elif hasattr(first, "as_bytes"):
                                    img_bytes = first.as_bytes()
                    except Exception:
                        pass

                    if img_bytes is None:
                        # Fallback via generate_content with image generation capability (if supported)
                        resp = model.generate_content([
                            {"text": f"Generate a {self.cfg.image_size} PNG image. Subject: {prompt}"}
                        ])
                        # SDK response object varies; try common patterns for image bytes
                        if hasattr(resp, "_result") and hasattr(resp._result, "media"):
                            media = resp._result.media
                            if media and isinstance(media, list):
                                # naive extraction
                                blob = media[0]
                                # try typical attributes
                                for attr in ("data", "bytes", "content"):
                                    if hasattr(blob, attr):
                                        img_bytes = getattr(blob, attr)
                                        break

                    if not img_bytes:
                        warn(f"Failed to extract image bytes for {filename}; skipping.")
                        continue

                    with open(target, "wb") as f:
                        f.write(img_bytes)
                    saved.append(target)
                except Exception as e:
                    warn(f"Image generation failed for {filename}: {e}")

        except Exception as e:
            warn(f"Gemini generation error: {e}")
        return saved

    # ----------------------------
    # Step 8: Merge images into Markdown
    # ----------------------------
    def merge_images_into_markdown(self, draft_md: str, plan: List[Dict]) -> str:
        if not plan:
            return draft_md
        # Simple heuristic: after matching heading, insert image block.
        lines = draft_md.splitlines()
        out_lines: List[str] = []
        i = 0
        inserted = {p.get("filename"): False for p in plan}
        while i < len(lines):
            line = lines[i]
            out_lines.append(line)
            if line.startswith("##") or line.startswith("###"):
                heading = line.lstrip("# ").strip()
                for item in plan:
                    if inserted.get(item.get("filename")):
                        continue
                    if not item.get("insert_after_heading"):
                        continue
                    if heading.lower() == str(item["insert_after_heading"]).strip().lower():
                        alt = item.get("alt") or ""
                        caption = item.get("caption") or ""
                        fn = item.get("filename") or "image.png"
                        rel_path = Path("images") / fn
                        block = f"\n![{alt}]({rel_path.as_posix()})\n\n*{caption}*\n"
                        out_lines.append(block)
                        inserted[fn] = True
            i += 1
        # Append any remaining images at the end
        for item in plan:
            fn = item.get("filename")
            if fn and not inserted.get(fn):
                alt = item.get("alt") or ""
                caption = item.get("caption") or ""
                rel_path = Path("images") / fn
                block = f"\n![{alt}]({rel_path.as_posix()})\n\n*{caption}*\n"
                out_lines.append(block)
                inserted[fn] = True
        return "\n".join(out_lines)

    # ----------------------------
    # Utilities
    # ----------------------------
    @staticmethod
    def _extract_json(text_out: str) -> Optional[Dict]:
        # Try to find the first JSON object in the output
        import re
        m = re.search(r"\{[\s\S]*\}", text_out)
        if not m:
            return None
        blob = m.group(0)
        try:
            return json.loads(blob)
        except Exception:
            return None

    @staticmethod
    def _extract_json_list(text_out: str) -> List[Dict]:
        # Try a JSON array first, then fallback to lines of JSON
        try:
            data = json.loads(text_out)
            if isinstance(data, list):
                return [d for d in data if isinstance(d, dict)]
        except Exception:
            pass
        items: List[Dict] = []
        for line in text_out.splitlines():
            line = line.strip()
            if not line:
                continue
            try:
                obj = json.loads(line)
                if isinstance(obj, dict):
                    items.append(obj)
            except Exception:
                continue
        return items

    @staticmethod
    def _hash_text(text: str, algo: str = "sha1", length: int = 12) -> str:
        norm = (text or "").strip().lower()
        if algo.lower() in ("sha1", "sha-1"):
            h = hashlib.sha1(norm.encode("utf-8")).hexdigest()
        elif algo.lower() in ("sha256", "sha-256"):
            h = hashlib.sha256(norm.encode("utf-8")).hexdigest()
        elif algo.lower() in ("md5",):
            h = hashlib.md5(norm.encode("utf-8")).hexdigest()
        else:
            h = hashlib.sha1(norm.encode("utf-8")).hexdigest()
        if length and length > 0:
            return h[:length]
        return h

    @staticmethod
    def _strip_trailing_tokens_used(md_text: str) -> str:
        import re
        lines = (md_text or "").splitlines()
        # drop trailing blanks
        while lines and lines[-1].strip() == "":
            lines.pop()
        # drop one trailing 'tokens used:' line (case-insensitive)
        if lines and re.search(r"tokens\s+used\s*:\s*\d+\s*$", lines[-1], flags=re.IGNORECASE):
            lines.pop()
        return "\n".join(lines) + ("\n" if lines else "")

    # ----------------------------
    # Orchestrate all steps
    # ----------------------------
    def run(self) -> Path:
        # Optional: run search to prepare run-scoped links file
        if self.cfg.query:
            self.search_links()
        self.crawl_articles()
        corpus = self.load_corpus(limit_each_chars=self.cfg.max_chars_per_source)
        draft_theme = self.cfg.theme or self.cfg.query or "fancy yellow diamonds"
        if self.cfg.compose_direct:
            draft = self.draft_blog_from_sources(draft_theme, corpus, self.cfg.eeat_profile)
        else:
            summaries = self.summarize_sources(corpus)
            draft = self.draft_blog(draft_theme, summaries, self.cfg.eeat_profile)

        # Anchors mapping (optional)
        anchored_md = draft
        anchors_map: Dict[str, str] = {}
        if self.cfg.anchors_file and Path(self.cfg.anchors_file).exists():
            anchors_map = json.loads(Path(self.cfg.anchors_file).read_text(encoding="utf-8"))
        if anchors_map:
            print("Found anchors mapping")
            anchored_md = self.insert_anchors(draft, anchors_map)
        else: 
            print("No anchors mapping found")
            
        # Images disabled
        final_md = anchored_md




        # SEO disabled
        
        
        
        
        
        # References: select authoritative links from links.txt and append to the article
        try:
            refs = self.build_references_from_links_file(max_count=10)
            if refs:
                final_md = self.append_references_section(final_md, refs)
        except Exception as e:
            warn(f"Failed to build/append references: {e}")

        # Save outputs
        ts = datetime.now().strftime('%Y%m%d_%H%M%S')
        final_path = self.out_dir / f"blog_{ts}.md"
        # Clean known noise before saving: drop preamble before first H1, then strip trailing token lines
        final_md = self._strip_preamble_before_first_h1(final_md)
        final_md = self._strip_trailing_tokens_used(final_md)
        final_path.write_text(final_md, encoding="utf-8")
        info(f"Final blog saved -> {final_path.as_posix()}")

        # Pre-image Codex review pass: clean tokens lines, remove Chinese, strip brands (except 'mvraki'), fix logic
        try:
            (self.debug_dir / "final_before_review.md").write_text(final_md, encoding="utf-8")
        except Exception:
            pass
        try:
            reviewed_md = self.review_article_with_codex(final_md)
            if reviewed_md and reviewed_md.strip():
                # Re-apply H1 preamble strip in case the model echoed tool banners
                final_md = self._strip_preamble_before_first_h1(reviewed_md)
                final_path.write_text(final_md, encoding="utf-8")
                try:
                    (self.debug_dir / "final_after_review.md").write_text(final_md, encoding="utf-8")
                except Exception:
                    pass
                info("Applied Codex review before image generation")
        except Exception as e:
            warn(f"Pre-image review skipped due to error: {e}")

        # Final step: generate and insert images into the saved markdown
        try:
            # Prefer environment/CLI-provided key for AIHubMix
            api_key = (self.cfg.aihubmix_api_key or os.environ.get("AIHUBMIX_API_KEY"))
            if not api_key:
                raise RuntimeError("AIHUBMIX_API_KEY missing; skip image insertion")

            # 1) Generate image descriptions from the final article content
            try:
                from agent.generate_image_describe import generate_image_descriptions
                desc_list = generate_image_descriptions(str(final_path), api_key=api_key, model="gpt-4o-mini")
            except Exception as e:
                raise RuntimeError(f"generate_image_descriptions failed: {e}")

            if not desc_list:
                raise RuntimeError("No image descriptions produced")

            # 2) Generate images for those descriptions
            try:
                # from agent.generate_image import batch_generate_images
                from agent.doubao_image_postprocess import batch_generate_and_clean_images
                # 将去水印后的图片直接保存到当前 run 的 images 目录，并返回相对路径
                raw_dir = (self.debug_dir / 'doubao_raw').as_posix()
                clean_dir = self.images_dir.as_posix()
                prompt_image_pairs = batch_generate_and_clean_images(
                    desc_list,
                    raw_dir=raw_dir,
                    clean_dir=clean_dir,
                    as_relative=True,
                )
            except Exception as e:
                raise RuntimeError(f"batch_generate_images failed: {e}")

            if not prompt_image_pairs:
                raise RuntimeError("No images generated")

            # Normalize paths for Markdown on Windows
            try:
                for item in prompt_image_pairs:
                    if isinstance(item, dict) and isinstance(item.get("image_path"), str):
                        item["image_path"] = item["image_path"].replace("\\", "/")
            except Exception:
                pass

            # 3) Insert images back into the article
            try:
                from agent.insert_image_article import insert_images_into_article
                ok = insert_images_into_article(str(final_path), prompt_image_pairs)
                if ok:
                    info("Images inserted into final markdown")
                else:
                    warn("Image insertion returned False; article unchanged")
            except Exception as e:
                raise RuntimeError(f"insert_images_into_article failed: {e}")

        except Exception as e:
            warn(f"Skip image insertion: {e}")

        # After image insertion, strip any leading logs/noise before first H1 again
        try:
            try:
                md_txt_after = final_path.read_text(encoding='utf-8', errors='ignore')
            except Exception:
                md_txt_after = None
            if md_txt_after:
                cleaned_after = self._strip_preamble_before_first_h1(md_txt_after)
                cleaned_after = self._strip_trailing_tokens_used(cleaned_after)
                if cleaned_after != md_txt_after:
                    final_path.write_text(cleaned_after, encoding='utf-8')
                    info("Stripped preamble before H1 after image insertion")
        except Exception as e:
            warn(f"Post-image cleanup failed: {e}")

        # Copy referenced images into output/images and rewrite links
        try:
            self._rewrite_images_to_output(final_path)
        except Exception as e:
            warn(f"Image rewrite step failed: {e}")

        # Convert final markdown to DOCX
        docx_path: Optional[Path] = None
        try:
            docx_path = self._convert_markdown_to_docx(final_path)
        except Exception as e:
            warn(f"DOCX conversion error: {e}")

        # Optional: send via WeCom (prefer DOCX if available)
        try:
            if getattr(self.cfg, 'wecom_send', False):
                webhook = self.cfg.wecom_webhook or os.environ.get('WECOM_WEBHOOK')
                if not webhook:
                    warn('WECOM webhook not provided; skip sending')
                else:
                    to_send = docx_path if (docx_path and docx_path.exists()) else final_path
                    try:
                        # Derive display name from H1 if possible
                        title = None
                        try:
                            md_txt = final_path.read_text(encoding='utf-8', errors='ignore')
                            lines = md_txt.splitlines()
                            for raw in lines:
                                s = raw.lstrip('\ufeff').lstrip()
                                if s.startswith('# ') and not s.startswith('##'):
                                    title = s[1:].strip()
                                    break
                        except Exception:
                            pass
                        disp = (title or to_send.stem) + to_send.suffix
                        from wecom_file_sender import WeComFileSender  # type: ignore
                        sender = WeComFileSender(webhook=webhook, timeout=15, disable_proxy=True)
                        sender.send_file(str(to_send), display_name=disp)
                        info(f"WeCom sent -> {to_send.as_posix()}")
                    except Exception as e2:
                        warn(f"WeCom send failed: {e2}")
        except Exception:
            pass

        # # Split the final markdown into two parts: steps (lines before first H1) and content (from H1 onward)
        # try:
        #     steps_md, content_md = self.split_markdown_at_first_h1(final_md)
        #     steps_path = self.out_dir / f"steps_{ts}.md"
        #     content_path = self.out_dir / f"content_{ts}.md"
        #     steps_path.write_text(steps_md, encoding="utf-8")
        #     content_path.write_text(content_md, encoding="utf-8")
        #     info(f"Split output -> steps: {steps_path.as_posix()} | content: {content_path.as_posix()}")
        # except Exception as e:
        #     warn(f"Failed to split markdown at H1: {e}")
        #     steps_path = None
        #     content_path = None

        # # Optional: prompt user to inject a single anchor text + URL into the article (content part)
        # try:
        #     if content_path and content_path.exists():
        #         insert_anchor_cli_if_provided(self,
        #             content_path,
        #             anchor_text_cli=self.cfg.anchor_text,
        #             anchor_url_cli=self.cfg.anchor_url,
        #         )

        # except Exception as e:
        #     warn(f"Anchor injection step skipped due to error: {e}")

        #TODO 插入图片
        return final_path

    @staticmethod
    def split_markdown_at_first_h1(md_text: str) -> Tuple[str, str]:
        """
        Split the markdown into two parts at the first H1 heading (a line starting with '# ').

        Returns a tuple (steps_md, content_md):
        - steps_md: all lines BEFORE the first H1 (may be empty)
        - content_md: from the first H1 line THROUGH the end (or entire text if no H1)

        If no H1 is found, steps_md is empty and content_md is the original text.
        """
        lines = md_text.splitlines()
        h1_index: Optional[int] = None
        for idx, line in enumerate(lines):
            # Consider a strict H1 as a line that starts with single '# ' (common in our outputs)
            # Also accept leading BOM/whitespace before '#'
            s = line.lstrip("\ufeff ")
            if s.startswith('# '):
                h1_index = idx
                break
        if h1_index is None:
            return ("", md_text)
        steps_md = "\n".join(lines[:h1_index]).rstrip() + ("\n" if h1_index > 0 else "")
        content_md = "\n".join(lines[h1_index:]).rstrip() + "\n"
        return (steps_md, content_md)

    def prompt_and_insert_anchor_into_content(self, content_path: Path, anchor_text_cli: Optional[str] = None, anchor_url_cli: Optional[str] = None) -> None:
        """
        After splitting, ask the user twice via stdin:
        1) Anchor text to insert
        2) URL for that anchor
        Then insert a single anchor naturally into the content using the Codex-based inserter.

        Notes:
        - If the user provides blank input for either, this step is skipped.
        - The content file is updated in place; a debug copy is saved under output/_debug.
        """
        # Prefer CLI-provided anchor text and URL
        if anchor_text_cli and anchor_url_cli:
            anchor_text = anchor_text_cli.strip()
            anchor_url = anchor_url_cli.strip()
            use_cli = True
        else:
            use_cli = False
            # Interactive prompt fallback
            try:
                print("Enter anchor text (leave blank to skip): ", end="", flush=True)
                anchor_text = input().strip()
            except EOFError:
                # Non-interactive environment; skip gracefully
                return
            if not anchor_text:
                info("Anchor text empty; skip anchor injection")
                return

            print("Enter URL for the anchor (leave blank to skip): ", end="", flush=True)
            try:
                anchor_url = input().strip()
            except EOFError:
                return
            if not anchor_url:
                info("URL empty; skip anchor injection")
                return

        # Basic sanity for URL
        if not (anchor_url.startswith("http://") or anchor_url.startswith("https://")):
            if use_cli:
                warn("CLI URL invalid (must start with http:// or https://); skip injection")
            else:
                warn("Input URL invalid (must start with http:// or https://); skip injection")
            return

        draft_md = content_path.read_text(encoding="utf-8", errors="ignore")
        mapping = {anchor_text: anchor_url}
        try:
            anchored_md = self.insert_anchors(draft_md, mapping)
        except Exception as e:
            warn(f"Codex 閿氭枃鏈敞鍏ュけ璐ワ細{e}")
            return

        # Save updated content and a debug copy
        content_path.write_text(anchored_md, encoding="utf-8")
        dbg = self.debug_dir / (content_path.stem + "_anchored.md")
        try:
            dbg.write_text(anchored_md, encoding="utf-8")
        except Exception:
            pass
        info(f"宸插皢閿氭枃鏈敞鍏ュ埌鍐呭鏂囦欢 -> {content_path.as_posix()}")


# Non-interactive single-anchor insertion helper (module-level)
from typing import Optional
from pathlib import Path


def insert_anchor_cli_if_provided(pipeline, content_path: Path, anchor_text_cli: Optional[str] = None, anchor_url_cli: Optional[str] = None) -> None:
    """
    Non-interactive anchor injection: only acts when both CLI values are provided.
    - No prompts; skips when missing.
    """
    if not (anchor_text_cli and anchor_url_cli):
        info("No --anchor-text/--anchor-url; skip anchor injection.")
        return
    anchor_text = anchor_text_cli.strip()
    anchor_url = anchor_url_cli.strip()
    if not anchor_text or not anchor_url:
        info("Empty anchor text or URL; skip.")
        return
    if not (anchor_url.startswith("http://") or anchor_url.startswith("https://")):
        warn("Anchor URL must start with http:// or https://; skip.")
        return
    draft_md = content_path.read_text(encoding="utf-8", errors="ignore")
    try:
        anchored_md = pipeline.insert_anchors(draft_md, {anchor_text: anchor_url})
    except Exception as e:
        warn(f"Anchor injection failed: {e}")
        return
    content_path.write_text(anchored_md, encoding="utf-8")
    dbg = pipeline.debug_dir / (content_path.stem + "_anchored.md")
    try:
        dbg.write_text(anchored_md, encoding="utf-8")
    except Exception:
        pass
    info(f"Anchor injected into content -> {content_path.as_posix()}")

def parse_cli(argv: List[str]) -> PipelineConfig:
    import argparse
    p = argparse.ArgumentParser(description="Automate crawl 鈫?synthesize 鈫?anchor 鈫?images pipeline with Codex + Gemini")
    p.add_argument("--env-file", default=".env", help="Path to environment file to load (optional)")
    p.add_argument("--links-file", default="./links.txt")
    p.add_argument("--crawler-script", default="crwaler_links.py")
    p.add_argument("--articles-dir", default="articles")
    p.add_argument("--articles-out", help="Override final directory to store crawled articles (absolute or relative)")
    p.add_argument("--output-dir", default="output")
    p.add_argument(
        "--browser",
        choices=["chromium", "firefox", "webkit", "chrome", "edge"],
        default="chromium",
    )
    p.add_argument("--skip-crawl", action="store_true")
    p.add_argument("--start", type=int)
    p.add_argument("--end", type=int)
    p.add_argument("--anchors-file", help="JSON mapping of anchor text 鈫?URL")
    p.add_argument("--eeat-profile", help="Author bio/profile to reinforce EEAT")
    p.add_argument("--gemini-api-key", help="Override GEMINI_API_KEY env var")
    p.add_argument("--codex-bin", help="Path or name of codex CLI (e.g., codex, codex.cmd)")
    # Optional single-anchor injection into split content
    p.add_argument("--anchor-text", help="Anchor text to insert into the content (optional)")
    p.add_argument("--anchor-url", help="URL for the anchor text (optional)")
    # Crawler controls
    p.add_argument("--crawler-timeout", type=int, default=60, help="Per-page load timeout (seconds)")
    p.add_argument("--crawler-settle", type=float, default=1.0, help="Extra settle wait after load (seconds)")
    p.add_argument("--crawler-workers", type=int, default=1, help="Number of parallel browser workers")
    p.add_argument("--crawler-headless", action="store_true", help="Run crawler in headless mode")
    p.add_argument(
        "--crawler-page-load-strategy",
        choices=["normal", "eager", "none"],
        default="normal",
        help="Playwright page wait strategy",
    )
    p.add_argument("--crawler-user-data-dir", help="Root folder for browser profiles; per-worker subdirs will be created")
    p.add_argument("--crawler-raw-html-out", help="Also write raw HTML into this folder (optional)")
    # Search + hashing
    p.add_argument("--summary-workers", type=int, default=3, help="Concurrent workers for summarization")
    p.add_argument("--query", help="User query to search and derive links")
    p.add_argument("--theme", help="Topic/theme for the generated blog article")
    p.add_argument("--compose-direct", action="store_true", help="Compose directly from full source articles (skip summarization)")
    p.add_argument("--max-chars-per-source", type=int, default=15000, help="Max characters to read per source article")
    p.add_argument("--runs-root", default="runs", help="Root folder to store hashed runs")
    p.add_argument("--hash-algo", default="sha1", choices=["md5", "sha1", "sha256"])
    p.add_argument("--hash-len", type=int, default=12)
    p.add_argument("--aihubmix-api-key", help="Key for aihubmix.com LLM (env: AIHUBMIX_API_KEY)")
    p.add_argument("--serper-api-key", help="Key for serper.dev search (env: SERPER_API_KEY)")
    p.add_argument("--search-delay", type=float, default=1.0)
    p.add_argument("--max-links", type=int)
    # WeCom sending
    p.add_argument("--wecom-send", action="store_true", help="Send the final .docx (or .md fallback) via WeCom webhook")
    p.add_argument("--wecom-webhook", help="Full WeCom webhook URL (or set env WECOM_WEBHOOK)")
    args = p.parse_args(argv)

    # Load env file if available (so that os.environ lookups work below)
    try:
        from dotenv import load_dotenv  # type: ignore
        if args.env_file and os.path.exists(args.env_file):
            load_dotenv(args.env_file)
            info(f"Loaded environment from {args.env_file}")
    except Exception:
        # python-dotenv not installed or load failed; continue silently
        pass
    return PipelineConfig(
        links_file=args.links_file,
        crawler_script=args.crawler_script,
        articles_dir=args.articles_dir,
        articles_out=args.articles_out,
        output_dir=args.output_dir,
        browser=args.browser,
        skip_crawl=args.skip_crawl,
        start=args.start,
        end=args.end,
        anchors_file=args.anchors_file,
        eeat_profile=args.eeat_profile,
        gemini_api_key=args.gemini_api_key,
        codex_bin=args.codex_bin,
        crawler_timeout=args.crawler_timeout,
        crawler_settle=args.crawler_settle,
        crawler_workers=args.crawler_workers,
        crawler_headless=args.crawler_headless,
        crawler_page_load_strategy=args.crawler_page_load_strategy,
        crawler_user_data_dir=args.crawler_user_data_dir,
        crawler_raw_html_out=args.crawler_raw_html_out,
        anchor_text=args.anchor_text,
        anchor_url=args.anchor_url,
        query=args.query,
        theme=args.theme,
        compose_direct=args.compose_direct,
        max_chars_per_source=args.max_chars_per_source,
        runs_root=args.runs_root,
        hash_algo=args.hash_algo,
        hash_len=args.hash_len,
        aihubmix_api_key=args.aihubmix_api_key,
        serper_api_key=args.serper_api_key,
        search_delay=args.search_delay,
        max_links=args.max_links,
        summary_workers=args.summary_workers,
        wecom_send=args.wecom_send,
        wecom_webhook=args.wecom_webhook,
    )


def main():
    cfg = parse_cli(sys.argv[1:])
    pipe = AutoBlogPipeline(cfg)
    pipe.run()


if __name__ == "__main__":
    main()
