#!/usr/bin/env python3
"""fetch_article.py

用法示例：
    # 只保存 HTML（不需要认证）
    python3 scripts/fetch_article.py --url "<article_url>" --out-dir out

    # 保存 HTML 并获取阅读/点赞/评论（需要 appmsg_token 与 cookie）
    python3 scripts/fetch_article.py --url "<article_url>" --out-dir out --appmsg-token <token> --cookie "<cookie>"

脚本功能：
- 下载文章 HTML
- 用 wechatarticles.Url2Html 替换/下载图片到 <out-dir>/<account>/imgs
- 可选：使用 wechatarticles.ArticlesInfo 获取阅读/点赞/评论并将结果写入 JSON

"""

import argparse
import os
import sys
import time
import json
from pathlib import Path

import requests

try:
    from wechatarticles import Url2Html, ArticlesInfo
except Exception:
    Url2Html = None
    ArticlesInfo = None


def fetch_html(url, proxies=None):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100 Safari/537.36"
    }
    resp = requests.get(url, headers=headers, proxies=proxies, timeout=30)
    resp.raise_for_status()
    return resp.text


def save_html_and_imgs(html, out_dir, img_path=None, proxies=None):
    if Url2Html is None:
        raise RuntimeError("wechatarticles 包未安装或无法导入 Url2Html")

    u = Url2Html(img_path=img_path)
    u.proxies = proxies or {"http": None, "https": None}

    # determine a safe account directory name and set u.account to an absolute path
    # get a title-based fallback
    try:
        title_guess = Url2Html.get_title(html) or "article"
    except Exception:
        title_guess = "article"
    safe_title = Url2Html.replace_name(title_guess)

    # try to extract account name from the html; fallback to title
    try:
        acct = u.article_info(html)[0]
        acct_safe = Url2Html.replace_name(acct) or safe_title
    except Exception:
        acct_safe = safe_title

    # img_path should be base directory where account subfolders are created
    base_img_dir = img_path or str(Path(out_dir).resolve())
    account_abs_dir = str(Path(base_img_dir) / acct_safe)
    u.account = account_abs_dir

    try:
        new_html, imgs = u.replace_img(html)
    except Exception as e:
        # If image download/processing fails, fallback to saving original HTML without images
        print(f"Warning: replace_img failed, saving raw HTML. Error: {e}")
        new_html = html
        imgs = []

    title = Url2Html.get_title(new_html) or title_guess

    # target dir for saving html should be the account folder under out_dir
    account_name = acct_safe
    target_dir = Path(out_dir) / account_name
    target_dir.mkdir(parents=True, exist_ok=True)

    html_path = target_dir / f"{safe_title}.html"
    with open(html_path, "w", encoding="utf-8") as f:
        f.write(new_html)

    # save imgs info
    imgs_info_path = target_dir / f"{safe_title}_imgs.json"
    with open(imgs_info_path, "w", encoding="utf-8") as f:
        json.dump([i[0] for i in imgs], f, ensure_ascii=False, indent=2)

    return {
        "title": title,
        "safe_title": safe_title,
        "account": account_name,
        "html_path": str(html_path),
        "imgs": [i[0] for i in imgs],
    }


def fetch_article(url, out_dir, appmsg_token=None, cookie=None, proxies=None):
    html = fetch_html(url, proxies=proxies)

    result = save_html_and_imgs(html, out_dir, img_path=str(Path(out_dir).resolve()), proxies=proxies)

    # 可选：获取阅读/点赞/评论
    if appmsg_token and cookie:
        if ArticlesInfo is None:
            raise RuntimeError("wechatarticles 包未安装或无法导入 ArticlesInfo")
        ai = ArticlesInfo(appmsg_token, cookie)
        try:
            read_num, like_num, old_like = ai.read_like_nums(url)
            comments = ai.comments(url)
            result.update({"read_num": read_num, "like_num": like_num, "old_like": old_like, "comments": comments})
        except Exception as e:
            result.update({"read_error": str(e)})

    return result


def main():
    p = argparse.ArgumentParser()
    p.add_argument("--url", required=True, help="微信公众号文章完整 URL")
    p.add_argument("--out-dir", default="out", help="输出目录")
    p.add_argument("--appmsg-token", dest="appmsg_token", help="appmsg_token（可选，用于获取阅读/点赞/评论）")
    p.add_argument("--cookie", help="wechat cookie（可选，用于获取阅读/点赞/评论）")
    p.add_argument("--proxy", help="http proxy，例如 http://127.0.0.1:8080")

    args = p.parse_args()

    proxies = None
    if args.proxy:
        proxies = {"http": args.proxy, "https": args.proxy}

    try:
        res = fetch_article(args.url, args.out_dir, appmsg_token=args.appmsg_token, cookie=args.cookie, proxies=proxies)
        print(json.dumps(res, ensure_ascii=False, indent=2))
    except Exception as e:
        print("ERROR:", e, file=sys.stderr)
        sys.exit(1)


if __name__ == "__main__":
    main()
