#!/usr/bin/env python3
"""convert_article.py

将保存的微信文章 HTML 提取正文并保存为 Markdown，同时可将条目追加到 CSV 汇总文件。

用法示例：
  # 处理单个 HTML 文件并输出到 out_md/ 目录，追加 CSV
  python3 scripts/convert_article.py --html out/导语/导语.html --out-md-dir out_md --csv out/articles.csv

  # 处理整个 out/ 目录下的 html 文件
  python3 scripts/convert_article.py --html out --out-md-dir out_md --csv out/articles.csv

"""
import argparse
import csv
import os
from pathlib import Path
from bs4 import BeautifulSoup
import re
import html


def safe_name(s: str) -> str:
    if s is None:
        return ''
    s = str(s)
    s = re.sub(r'[\\/:*?"<>|]', '', s)
    s = s.strip().replace('\n', ' ')
    return s


def extract_js_content(soup: BeautifulSoup):
    # WeChat article main content is usually in #js_content
    node = soup.select_one('#js_content')
    if node:
        return node
    # fallback common classes
    node = soup.select_one('.rich_media_content')
    if node:
        return node
    # fallback to body
    return soup.body or soup


def html_to_markdown(node: BeautifulSoup) -> str:
    # Simple conversion: join paragraphs and headings
    parts = []
    for el in node.find_all(['h1', 'h2', 'h3', 'p', 'blockquote', 'pre', 'div']):
        text = el.get_text(separator=' ', strip=True)
        if not text:
            continue
        name = el.name.lower()
        if name.startswith('h'):
            parts.append('\n## ' + text + '\n')
        elif name == 'blockquote':
            parts.append('> ' + text + '\n')
        else:
            parts.append(text + '\n')
    md = '\n'.join(parts)
    # unescape HTML entities
    return html.unescape(md).strip()


def parse_article(html_path: Path):
    txt = html_path.read_text(encoding='utf-8', errors='ignore')
    soup = BeautifulSoup(txt, 'lxml')
    # title
    title = ''
    try:
        # try wechat typical title
        el = soup.select_one('h2')
        title = el.get_text(strip=True) if el is not None else ''
    except Exception:
        title = ''
    if not title:
        # fallback to title tag
        title = (soup.title.string if soup.title else '') or ''

    # account/author
    account = ''
    try:
        # many pages embed nickname = "..."
        m = re.search(r'nickname = "([^"]+)"', txt)
        if m:
            account = m.group(1) or ''
    except Exception:
        account = ''

    # timestamp
    date = ''
    try:
        m = re.search(r'ct = "(\d+)"', txt)
        if m:
            ts = int(m.group(1))
            import time

            lt = time.localtime(ts)
            date = f"{lt.tm_year}-{lt.tm_mon:02d}-{lt.tm_mday:02d}"
    except Exception:
        date = ''

    node = extract_js_content(soup)
    md = html_to_markdown(node)
    excerpt = md[:300].replace('\n', ' ') if md else ''

    return {
        'title': safe_name(title) or html_path.stem,
        'account': safe_name(account) or html_path.parent.name,
        'date': date,
        'markdown': md,
        'excerpt': excerpt,
    }


def ensure_dir(p: Path):
    p.mkdir(parents=True, exist_ok=True)


def main():
    p = argparse.ArgumentParser()
    p.add_argument('--html', required=True, help='HTML file path or directory')
    p.add_argument('--out-md-dir', default='out_md', help='输出 markdown 的目录')
    p.add_argument('--csv', default=None, help='追加的 CSV 文件路径（可选）')

    args = p.parse_args()
    html_path = Path(args.html)
    out_md_dir = Path(args.out_md_dir)
    ensure_dir(out_md_dir)

    html_files = []
    if html_path.is_dir():
        html_files = list(html_path.rglob('*.html'))
    elif html_path.is_file():
        html_files = [html_path]
    else:
        print('html path not found', html_path)
        return

    csv_file = Path(args.csv) if args.csv else None
    if csv_file:
        ensure_dir(csv_file.parent)
        csv_exists = csv_file.exists()
        csvf = open(csv_file, 'a', newline='', encoding='utf-8')
        writer = csv.writer(csvf)
        if not csv_exists:
            writer.writerow(['file', 'title', 'account', 'date', 'markdown_path', 'excerpt'])
    else:
        writer = None

    for hf in html_files:
        data = parse_article(hf)
        acct_dir = out_md_dir / data['account']
        ensure_dir(acct_dir)
        md_path = acct_dir / (data['title'] + '.md')
        with open(md_path, 'w', encoding='utf-8') as f:
            f.write(f"# {data['title']}\n\n")
            if data['date']:
                f.write(f"**日期:** {data['date']}\n\n")
            f.write(data['markdown'])

        if writer:
            writer.writerow([str(hf), data['title'], data['account'], data['date'], str(md_path), data['excerpt']])

    if csv_file:
        csvf.close()


if __name__ == '__main__':
    main()
