#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
AMOS Technical Papers scraper
- Crawls a given year page like https://amostech.space/amos/2025/
- Visits each paper page under /year/<year>/...
- Extracts: year, title, authors, keywords, track, paper_url, pdf_url
- Downloads PDFs to data/papers/<year>/<slug>.pdf
- Writes CSV metadata to data/metadata_<year>.csv

Usage examples:
  python src/amos_scraper.py --years 2025 --out data
  python src/amos_scraper.py --years 2023-2025 --max-workers 8
  python src/amos_scraper.py --years 2025 --dry-run
"""

from __future__ import annotations

import argparse
import csv
import os
import re
import sys
import time
import concurrent.futures as cf
from dataclasses import dataclass, asdict
from typing import Iterable, List, Optional, Set, Tuple

import requests
from bs4 import BeautifulSoup, NavigableString, Tag
from tqdm import tqdm
from urllib.parse import urljoin, urlparse
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry

BASE_INDEX_FMT = "https://amostech.space/amos/{year}/"
PAPER_PAGE_PREFIX_FMT = "https://amostech.space/year/{year}/"


@dataclass
class PaperMeta:
    year: int
    title: str
    authors: str
    keywords: str
    track: str
    paper_url: str
    pdf_url: str
    pdf_path: str


def get_session(timeout: float = 30.0) -> requests.Session:
    s = requests.Session()
    retries = Retry(
        total=5,
        backoff_factor=0.5,
        status_forcelist=(429, 500, 502, 503, 504),
        allowed_methods=("GET", "HEAD"),
        raise_on_status=False,
    )
    adapter = HTTPAdapter(max_retries=retries, pool_connections=50, pool_maxsize=50)
    s.mount("http://", adapter)
    s.mount("https://", adapter)
    s.headers.update({
        "User-Agent": "Mozilla/5.0 (compatible; AMOS-Scraper/1.0; +https://amostech.space/)",
        "Accept-Language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
    })
    # attach default timeout
    s.request = _wrap_timeout(s.request, timeout)
    return s


def _wrap_timeout(request_func, timeout_default: float):
    def _wrapped(method, url, **kwargs):
        if "timeout" not in kwargs:
            kwargs["timeout"] = timeout_default
        return request_func(method, url, **kwargs)
    return _wrapped


def slugify(text: str) -> str:
    text = text.strip().lower()
    # Replace non-word with hyphens
    text = re.sub(r"[^a-z0-9]+", "-", text)
    text = re.sub(r"-+", "-", text).strip("-")
    return text or "paper"


def ensure_dir(path: str) -> None:
    os.makedirs(path, exist_ok=True)


def parse_years_arg(arg: str) -> List[int]:
    years: Set[int] = set()
    for part in arg.split(','):
        part = part.strip()
        if not part:
            continue
        if '-' in part:
            a, b = part.split('-', 1)
            a, b = int(a), int(b)
            if a > b:
                a, b = b, a
            years.update(range(a, b + 1))
        else:
            years.add(int(part))
    return sorted(years)


def collect_paper_links(session: requests.Session, year: int, delay: float = 0.0) -> List[str]:
    index_url = BASE_INDEX_FMT.format(year=year)
    r = session.get(index_url)
    r.raise_for_status()
    soup = BeautifulSoup(r.text, "lxml")

    prefix = PAPER_PAGE_PREFIX_FMT.format(year=year)
    links: Set[str] = set()

    for a in soup.find_all("a", href=True):
        href = a["href"].strip()
        if href.startswith(prefix):
            links.add(href)
    # Some pages may paginate; attempt to include next pages if any (rare)
    # Heuristic: look for page/2, page/3 ... but the AMOS year page lists all papers alphabetically on one page.

    time.sleep(delay)
    return sorted(links)


def _first_text_line_after(h1: Tag) -> Optional[str]:
    for sib in h1.next_siblings:
        if isinstance(sib, NavigableString):
            txt = str(sib).strip()
            if txt:
                # Could be inline text, often empty though
                if not txt.lower().startswith(("keywords:", "abstract:", "date of conference:", "track:")):
                    return txt
        elif isinstance(sib, Tag):
            txt = sib.get_text(" ", strip=True)
            if not txt:
                continue
            # Split into lines to capture first logical line
            # Some posts may add multiple chunks before keywords.
            for line in re.split(r"[\r\n]+", txt):
                line = line.strip()
                if not line:
                    continue
                low = line.lower()
                if low.startswith("keywords:") or low.startswith("abstract:") or low.startswith("date of conference:") or low.startswith("track:"):
                    return None
                return line
    return None


def _extract_by_label(soup: BeautifulSoup, label: str) -> str:
    # find text that starts with label (case-insensitive)
    pat = re.compile(rf"^{re.escape(label)}\s*(.*)$", re.IGNORECASE)
    for el in soup.find_all(text=pat):
        m = pat.match(str(el))
        if m:
            val = m.group(1).strip()
            # Keywords sometimes wrap onto next line; include immediate next text node if it doesn't start with a label
            nxt = el.next_sibling
            if isinstance(nxt, NavigableString):
                extra = str(nxt).strip()
                if extra and not extra.lower().startswith(("track:", "abstract:", "date of conference:")):
                    val = (val + " " + extra).strip()
            return val
    # fallback: search anywhere in full text
    text = soup.get_text("\n")
    for line in text.splitlines():
        if line.strip().lower().startswith(label.lower()):
            return line.split(":", 1)[-1].strip()
    return ""


def _find_pdf_url(soup: BeautifulSoup) -> Optional[str]:
    # Prefer anchor with text 'View Paper'
    for a in soup.find_all("a", href=True):
        label = a.get_text(strip=True).lower()
        href = a["href"].strip()
        if label == "view paper" and href.lower().endswith(".pdf"):
            return href
    # Otherwise any PDF link, prioritize amostech.com/TechnicalPapers
    pdfs = []
    for a in soup.find_all("a", href=True):
        href = a["href"].strip()
        if href.lower().endswith(".pdf"):
            score = 0
            if "amostech.com/TechnicalPapers" in href:
                score += 2
            if "2025" in href:
                score += 1
            pdfs.append((score, href))
    if pdfs:
        pdfs.sort(reverse=True)
        return pdfs[0][1]
    return None


def parse_paper_page(session: requests.Session, url: str, year_hint: Optional[int] = None, delay: float = 0.0) -> PaperMeta:
    r = session.get(url)
    r.raise_for_status()
    soup = BeautifulSoup(r.text, "lxml")

    # Title
    h1 = soup.find(["h1", "h2"], string=True)
    title = h1.get_text(strip=True) if h1 else soup.title.get_text(strip=True) if soup.title else ""

    # Authors: best-effort – the first meaningful line after h1 and before 'Keywords:'
    authors = ""
    if h1:
        authors = _first_text_line_after(h1) or ""
    if not authors:
        # fallback: look for a line that looks like a list of names (has commas and maybe semicolons)
        text = soup.get_text("\n")
        for line in text.splitlines():
            line = line.strip()
            if line and not line.lower().startswith(("keywords:", "abstract:", "date of conference:", "track:")) and ("," in line or ";" in line):
                authors = line
                break

    keywords = _extract_by_label(soup, "Keywords:")
    track = _extract_by_label(soup, "Track:")

    pdf_url = _find_pdf_url(soup) or ""

    # Year: derive from URL path if present
    m = re.search(r"/year/(\d{4})/", url)
    year = int(m.group(1)) if m else (int(year_hint) if year_hint else 0)

    meta = PaperMeta(
        year=year,
        title=title,
        authors=authors,
        keywords=keywords,
        track=track,
        paper_url=url,
        pdf_url=pdf_url,
        pdf_path="",
    )
    time.sleep(delay)
    return meta


def download_pdf(session: requests.Session, url: str, dest_path: str) -> bool:
    if not url:
        return False
    ensure_dir(os.path.dirname(dest_path))
    # Skip if exists and size > 0
    if os.path.exists(dest_path) and os.path.getsize(dest_path) > 0:
        return True
    with session.get(url, stream=True) as r:
        if r.status_code != 200 or "application/pdf" not in r.headers.get("Content-Type", "").lower():
            # Sometimes Content-Type may be octet-stream; still accept if .pdf
            if r.status_code != 200:
                return False
        tmp_path = dest_path + ".part"
        with open(tmp_path, "wb") as f:
            for chunk in r.iter_content(chunk_size=1 << 14):
                if chunk:
                    f.write(chunk)
        os.replace(tmp_path, dest_path)
    return True


def write_csv_header(path: str) -> None:
    ensure_dir(os.path.dirname(path))
    if not os.path.exists(path):
        with open(path, "w", newline="", encoding="utf-8") as f:
            w = csv.writer(f)
            w.writerow(["year", "title", "authors", "keywords", "track", "paper_url", "pdf_url", "pdf_path"])


def append_csv_row(path: str, meta: PaperMeta) -> None:
    with open(path, "a", newline="", encoding="utf-8") as f:
        w = csv.writer(f)
        w.writerow([meta.year, meta.title, meta.authors, meta.keywords, meta.track, meta.paper_url, meta.pdf_url, meta.pdf_path])


def process_year(session: requests.Session, year: int, out_dir: str, max_workers: int, delay: float, dry_run: bool) -> Tuple[int, int]:
    links = collect_paper_links(session, year, delay=delay)
    if not links:
        print(f"[WARN] No paper links found for {year}")
        return 0, 0

    csv_path = os.path.join(out_dir, f"metadata_{year}.csv")
    write_csv_header(csv_path)

    metas: List[PaperMeta] = []
    # Fetch metadata sequentially to be polite
    for url in tqdm(links, desc=f"Fetch meta {year}"):
        try:
            meta = parse_paper_page(session, url, year_hint=year, delay=delay)
        except Exception as e:
            print(f"[ERROR] parse failed: {url}: {e}")
            continue
        metas.append(meta)

    # Download PDFs in parallel
    downloaded = 0
    if not dry_run:
        def _dl(meta: PaperMeta) -> Tuple[PaperMeta, bool]:
            title_slug = slugify(meta.title)[:120]
            # Use filename from URL if available
            filename = os.path.basename(urlparse(meta.pdf_url).path) if meta.pdf_url else f"{title_slug}.pdf"
            if not filename.lower().endswith('.pdf'):
                filename = f"{title_slug or 'paper'}.pdf"
            pdf_path = os.path.join(out_dir, "papers", str(meta.year), filename)
            ok = download_pdf(session, meta.pdf_url, pdf_path) if meta.pdf_url else False
            meta.pdf_path = pdf_path if ok else ""
            return meta, ok

        with cf.ThreadPoolExecutor(max_workers=max_workers) as ex:
            for meta, ok in tqdm(ex.map(_dl, metas), total=len(metas), desc=f"Download PDFs {year}"):
                if ok:
                    downloaded += 1
                append_csv_row(csv_path, meta)
    else:
        # Dry-run: don't download, just write CSV with pdf_url
        for meta in metas:
            append_csv_row(csv_path, meta)

    return len(metas), downloaded


def main(argv: Optional[List[str]] = None) -> int:
    p = argparse.ArgumentParser(description="AMOS Technical Papers bulk downloader and metadata extractor")
    p.add_argument("--years", default="2025", help="Years to scrape. Examples: '2025' or '2023-2025' or '2019,2022,2025'")
    p.add_argument("--out", default="data", help="Output directory (default: data)")
    p.add_argument("--max-workers", type=int, default=6, help="Max concurrent downloads (default: 6)")
    p.add_argument("--delay", type=float, default=0.5, help="Polite delay (seconds) between page requests (default: 0.5)")
    p.add_argument("--dry-run", action="store_true", help="Do not download PDFs, only collect metadata")

    args = p.parse_args(argv)
    years = parse_years_arg(args.years)

    session = get_session()
    total_meta = 0
    total_pdf = 0

    for y in years:
        try:
            meta_count, pdf_count = process_year(session, y, args.out, args.max_workers, args.delay, args.dry_run)
            total_meta += meta_count
            total_pdf += pdf_count
        except Exception as e:
            print(f"[ERROR] Year {y} failed: {e}")

    print(f"Done. Metadata rows: {total_meta}, PDFs downloaded: {total_pdf}")
    return 0


if __name__ == "__main__":
    raise SystemExit(main())
