import argparse
import hashlib
import os
import re
import sys
import time
import random
from datetime import datetime
from typing import List, Optional, Tuple, Dict, Iterable

from urllib.parse import urlparse

from bs4 import BeautifulSoup
from readability import Document
from playwright.sync_api import sync_playwright, BrowserContext, Page


def read_urls(path: str) -> List[str]:
    if not os.path.exists(path):
        raise FileNotFoundError(f"Links file not found: {path}")
    urls: List[str] = []
    with open(path, "r", encoding="utf-8", errors="ignore") as f:
        for line in f:
            u = line.strip()
            if not u:
                continue
            if not (u.startswith("http://") or u.startswith("https://")):
                continue
            urls.append(u)
    return urls


def sanitize_filename(url: str) -> str:
    parsed = urlparse(url)
    domain = parsed.netloc or "no-domain"
    path = parsed.path or ""
    base = (domain + path).strip("/").replace("/", "_")
    base = re.sub(r"[^A-Za-z0-9_.-]", "_", base) or "index"
    h = hashlib.md5(url.encode("utf-8")).hexdigest()[:8]
    return f"{base}__{h}"


def _text_from_html(html: str) -> str:
    soup = BeautifulSoup(html, "lxml")
    for tag in soup(["script", "style", "noscript", "nav", "aside", "footer", "form"]):
        tag.decompose()
    text = soup.get_text("\n")
    lines = [ln.strip() for ln in text.splitlines()]
    chunks = [ln for ln in lines if ln]
    return "\n".join(chunks)


def extract_meta(full_html: str) -> Dict[str, Optional[str]]:
    soup = BeautifulSoup(full_html, "lxml")

    def meta_content(*names) -> Optional[str]:
        for name in names:
            m = soup.find("meta", attrs={"name": name}) or soup.find("meta", attrs={"property": name})
            if m and m.get("content"):
                return m.get("content").strip()
        return None

    author = meta_content("author", "article:author", "byline", "dc.creator")
    pub = meta_content(
        "article:published_time",
        "pubdate",
        "publish_date",
        "date",
        "og:updated_time",
        "article:modified_time",
    )
    desc = meta_content("description", "og:description")
    site = meta_content("og:site_name")
    return {"author": author, "published": pub, "description": desc, "site": site}


def extract_article(full_html: str, fallback_title: Optional[str] = None) -> Tuple[str, str, str]:
    try:
        doc = Document(full_html)
        title = doc.short_title() or doc.title() or fallback_title or ""
        content_html = doc.summary()
        content_text = _text_from_html(content_html)
        if content_text.strip():
            return title.strip(), content_html, content_text
    except Exception:
        pass

    soup = BeautifulSoup(full_html, "lxml")
    node = soup.find("article") or soup.find("main")
    if not node:
        candidates = soup.find_all(["div", "section"])
        node = max(candidates, key=lambda n: len(n.get_text("\n")), default=soup.body)
    content_html = str(node) if node else full_html
    content_text = _text_from_html(content_html)
    title = fallback_title or (soup.title.get_text(strip=True) if soup.title else "")
    return title.strip(), content_html, content_text


def _pick_browser(play, browser: str):
    b = (browser or "chromium").lower()
    if b in ("chrome", "edge", "chromium"):
        return play.chromium
    if b in ("firefox", "ff"):
        return play.firefox
    if b in ("webkit", "safari"):
        return play.webkit
    return play.chromium


def _random_user_agent() -> str:
    uas = [
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:126.0) Gecko/20100101 Firefox/126.0",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.2 Safari/605.1.15",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36",
    ]
    return random.choice(uas)


def _random_viewport() -> Dict[str, int]:
    sizes = [(1366, 768), (1440, 900), (1536, 864), (1600, 900), (1920, 1080)]
    w, h = random.choice(sizes)
    w += random.randint(-10, 10)
    h += random.randint(-10, 10)
    return {"width": max(1024, w), "height": max(700, h)}


def create_context(
    play,
    browser: str,
    user_data_dir: Optional[str],
    headless: bool,
    user_agent: Optional[str] = None,
    locale: Optional[str] = None,
    timezone_id: Optional[str] = None,
) -> BrowserContext:
    bt = _pick_browser(play, browser)
    if user_data_dir:
        os.makedirs(user_data_dir, exist_ok=True)
        ctx = bt.launch_persistent_context(
            user_data_dir=user_data_dir,
            headless=headless,
            viewport=_random_viewport(),
            user_agent=user_agent or _random_user_agent(),
            locale=locale or "zh-CN",
            timezone_id=timezone_id or "Asia/Shanghai",
            accept_downloads=False,
        )
        return ctx
    browser_inst = bt.launch(headless=headless)
    ctx = browser_inst.new_context(
        viewport=_random_viewport(),
        user_agent=user_agent or _random_user_agent(),
        locale=locale or "zh-CN",
        timezone_id=timezone_id or "Asia/Shanghai",
        accept_downloads=False,
    )
    return ctx


def wait_for_ready(page: Page, timeout: int, wait_until: str):
    wait_map = {"normal": "load", "eager": "domcontentloaded", "none": "commit"}
    wu = wait_map.get(wait_until, "load")
    page.wait_for_load_state(wu, timeout=max(1, timeout) * 1000)
    try:
        page.wait_for_selector("body", state="attached", timeout=min(5000, max(1000, timeout * 500)))
    except Exception:
        pass


def auto_scroll_to_bottom(page: Page, max_time: float, pause: float, max_steps: int):
    import time as _t
    try:
        get_h = lambda: page.evaluate(
            "() => Math.max(document.body.scrollHeight, document.documentElement.scrollHeight)"
        )
        last_h = get_h()
        start = _t.time()
        steps = 0
        while True:
            page.evaluate("window.scrollTo(0, document.body.scrollHeight);")
            _t.sleep(max(0.05, pause))
            new_h = get_h()
            steps += 1
            if new_h <= last_h:
                page.evaluate("window.scrollBy(0, -200);")
                _t.sleep(0.1)
                page.evaluate("window.scrollBy(0, 200);")
                _t.sleep(max(0.05, pause))
                newer_h = get_h()
                if newer_h <= last_h:
                    break
                last_h = newer_h
            else:
                last_h = new_h
            if (_t.time() - start) >= max_time or steps >= max_steps:
                break
    except Exception:
        pass


def _chunked(iterable: List[str], n_chunks: int) -> List[List[str]]:
    if n_chunks <= 1:
        return [list(iterable)]
    chunks = [[] for _ in range(n_chunks)]
    for i, item in enumerate(iterable):
        chunks[i % n_chunks].append(item)
    return chunks


def _ensure_unique_profile_root(user_data_dir: Optional[str], run_tag: str) -> str:
    base = user_data_dir or os.path.join("runs", run_tag, "profiles")
    os.makedirs(base, exist_ok=True)
    return base


def _worker(
    worker_id: int,
    urls: Iterable[str],
    out_dir: str,
    browser: str,
    timeout: int,
    settle: float,
    goto_wait: str,
    profile_root: str,
    fail_log: Optional[str],
    out_format: str,
    raw_html_out: Optional[str],
    scroll_max_time: float,
    scroll_pause: float,
    scroll_max_steps: int,
    disable_scroll: bool,
    headless: bool,
) -> int:
    success = 0
    profile_dir = os.path.join(profile_root, f"worker_{worker_id}")
    with sync_playwright() as pw:
        ctx = create_context(
            pw, browser=browser, user_data_dir=profile_dir, headless=headless
        )
        page = ctx.pages[0] if ctx.pages else ctx.new_page()
        for url in urls:
            try:
                page.goto(
                    url,
                    wait_until={"normal": "load", "eager": "domcontentloaded", "none": "commit"}.get(goto_wait, "load"),
                    timeout=max(1, timeout) * 1000,
                )
                wait_for_ready(page, timeout, goto_wait)
                if settle > 0:
                    time.sleep(settle)
                if not disable_scroll:
                    auto_scroll_to_bottom(page, max_time=scroll_max_time, pause=scroll_pause, max_steps=scroll_max_steps)
                    if settle > 0:
                        time.sleep(min(1.0, settle))

                html = page.content()
                if raw_html_out:
                    raw_name = sanitize_filename(url) + ".html"
                    with open(os.path.join(raw_html_out, raw_name), "w", encoding="utf-8", errors="replace") as rf:
                        rf.write(html)

                meta = extract_meta(html)
                try:
                    title_text = page.title()
                except Exception:
                    title_text = ""
                title, content_html, content_text = extract_article(html, fallback_title=title_text)

                base_name = sanitize_filename(url)
                if out_format == "md":
                    out_path = os.path.join(out_dir, base_name + ".md")
                    with open(out_path, "w", encoding="utf-8", errors="replace") as f:
                        if title:
                            f.write(f"# {title}\n\n")
                        f.write(f"- Source: {url}\n")
                        if meta.get("site"):
                            f.write(f"- Site: {meta['site']}\n")
                        if meta.get("author"):
                            f.write(f"- Author: {meta['author']}\n")
                        if meta.get("published"):
                            f.write(f"- Published: {meta['published']}\n")
                        f.write(f"- Fetched: {datetime.now().isoformat()}\n\n")
                        f.write(content_text.strip() + "\n")
                else:
                    out_path = os.path.join(out_dir, base_name + ".txt")
                    with open(out_path, "w", encoding="utf-8", errors="replace") as f:
                        if title:
                            f.write(title + "\n\n")
                        f.write(f"Source: {url}\n")
                        if meta.get("site"):
                            f.write(f"Site: {meta['site']}\n")
                        if meta.get("author"):
                            f.write(f"Author: {meta['author']}\n")
                        if meta.get("published"):
                            f.write(f"Published: {meta['published']}\n")
                        f.write(f"Fetched: {datetime.now().isoformat()}\n\n")
                        f.write(content_text.strip() + "\n")

                success += 1
                print(f"    工作者#{worker_id} 已保存 -> {out_path}")
            except Exception as e:
                print(f"    工作者#{worker_id} 失败: {e}")
                if fail_log:
                    try:
                        with open(fail_log, "a", encoding="utf-8") as lf:
                            lf.write(f"{datetime.now().isoformat()}\t{url}\t{repr(e)}\n")
                    except Exception:
                        pass
            time.sleep(0.1)
        ctx.close()
    return success


def crawl(
    links_file: str,
    out_dir: str,
    browser: str = "chromium",
    timeout: int = 60,
    settle: float = 1.0,
    start: Optional[int] = None,
    end: Optional[int] = None,
    page_load_strategy: str = "normal",
    user_data_dir: Optional[str] = None,
    fail_log: Optional[str] = None,
    out_format: str = "md",
    raw_html_out: Optional[str] = None,
    scroll_max_time: float = 20.0,
    scroll_pause: float = 0.4,
    scroll_max_steps: int = 120,
    disable_scroll: bool = False,
    workers: int = 1,
    headless: bool = False,
):
    urls = read_urls(links_file)
    n = len(urls)
    if n == 0:
        print("未从链接文件中读取到有效的 http(s) 链接")
        return

    s = 0 if start is None else max(0, start - 1)
    e = n if end is None else min(n, end)
    urls = urls[s:e]

    os.makedirs(out_dir, exist_ok=True)
    if raw_html_out:
        os.makedirs(raw_html_out, exist_ok=True)
    if fail_log:
        try:
            with open(fail_log, "w", encoding="utf-8") as _:
                pass
        except Exception:
            fail_log = None

    print(f"总链接数: {n}，本次处理区间: {s+1}-{s+len(urls)}，输出文件夹: {out_dir}")

    run_tag = datetime.now().strftime("%Y%m%d_%H%M%S_") + os.urandom(3).hex()
    profile_root = _ensure_unique_profile_root(user_data_dir, run_tag)

    splits = _chunked(urls, max(1, workers))

    total_success = 0
    if max(1, workers) == 1:
        total_success = _worker(
            1, splits[0], out_dir, browser, timeout, settle, page_load_strategy,
            profile_root, fail_log, out_format, raw_html_out, scroll_max_time,
            scroll_pause, scroll_max_steps, disable_scroll, headless
        )
    else:
        from concurrent.futures import ThreadPoolExecutor, as_completed
        with ThreadPoolExecutor(max_workers=max(1, workers)) as ex:
            futs = []
            for i, part in enumerate(splits, start=1):
                if not part:
                    continue
                futs.append(ex.submit(
                    _worker, i, part, out_dir, browser, timeout, settle,
                    page_load_strategy, profile_root, fail_log, out_format,
                    raw_html_out, scroll_max_time, scroll_pause, scroll_max_steps,
                    disable_scroll, headless
                ))
            for f in as_completed(futs):
                try:
                    total_success += f.result()
                except Exception:
                    pass

    print(f"完成。成功 {total_success}，失败 {len(urls) - total_success}，输出目录: {out_dir}")


def main():
    parser = argparse.ArgumentParser(
        description=(
            "使用 Playwright 批量抓取网页文章内容。默认读取 data_prepared_/links.txt\n"
            "提取标题/作者/发布时间与正文，保存为 .md 或 .txt"
        )
    )
    parser.add_argument(
        "-i",
        "--input",
        default=os.path.join("data_prepared_", "links.txt"),
        help="链接文件路径 (每行一个URL, 仅支持 http/https)",
    )
    parser.add_argument(
        "-o",
        "--out",
        default="articles",
        help="文章输出目录",
    )
    parser.add_argument(
        "-b",
        "--browser",
        choices=["chromium", "firefox", "webkit", "chrome", "edge"],
        default="chromium",
        help="选择浏览器类型 (chromium/firefox/webkit)",
    )
    parser.add_argument(
        "-t",
        "--timeout",
        type=int,
        default=10,
        help="单页加载超时时间(秒)",
    )
    parser.add_argument(
        "--settle",
        type=float,
        default=1,
        help="页面加载完成后额外等待(秒)，利于动态内容渲染",
    )
    parser.add_argument(
        "--start",
        type=int,
        help="从第几个链接开始(1-based, 可选)",
    )
    parser.add_argument(
        "--end",
        type=int,
        help="到第几个链接结束(1-based, 可选)",
    )
    parser.add_argument(
        "--page-load-strategy",
        choices=["normal", "eager", "none"],
        default="normal",
        help="页面等待策略: normal(load)/eager(DOMContentLoaded)/none(commit)",
    )
    parser.add_argument(
        "--user-data-dir",
        help="浏览器用户数据根目录; 会在其下为每个线程创建独立 profile",
    )
    parser.add_argument(
        "--fail-log",
        default="crawl_errors.log",
        help="失败日志文件路径",
    )
    parser.add_argument(
        "--format",
        choices=["md", "txt"],
        default="md",
        help="文章输出格式 (md/txt)",
    )
    parser.add_argument(
        "--raw-html-out",
        help="同时保存原始HTML到该目录(可选)",
    )
    parser.add_argument(
        "-w",
        "--workers",
        type=int,
        default=2,
        help="并发线程数 (每线程独立浏览器环境)",
    )
    parser.add_argument(
        "--headless",
        action="store_true",
        help="使用无头模式(可能更容易被检测; 默认关闭)",
    )

    args = parser.parse_args()

    try:
        crawl(
            links_file=args.input,
            out_dir=args.out,
            browser=args.browser,
            timeout=args.timeout,
            settle=args.settle,
            start=args.start,
            end=args.end,
            page_load_strategy=args.page_load_strategy,
            user_data_dir=args.user_data_dir,
            fail_log=args.fail_log,
            out_format=args.format,
            raw_html_out=args.raw_html_out,
            workers=args.workers,
            headless=args.headless,
        )
    except KeyboardInterrupt:
        print("已中断")
    except Exception as e:
        print(f"运行失败: {e}")
        sys.exit(1)


if __name__ == "__main__":
    main()

