| |
| """ |
| scrape_plans.py — Fetch the RocketReviews.com plans index and scrape each |
| detail page, saving structured JSON to source/plans/. |
| |
| Output |
| ------ |
| source/plans/index.jsonl one record per plan (raw index fields) |
| source/plans/detail/{slug}.json full parsed detail per plan |
| |
| Usage |
| ----- |
| python scripts/plans/01_scrape.py |
| python scripts/plans/01_scrape.py --delay 1.0 --limit 10 |
| python scripts/plans/01_scrape.py --force # re-scrape existing files |
| """ |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import json |
| import logging |
| import re |
| import sys |
| import time |
| from datetime import datetime, timezone |
| from pathlib import Path |
| from typing import Optional |
|
|
| import requests |
| from bs4 import BeautifulSoup |
| from requests.adapters import HTTPAdapter |
| from urllib3.util.retry import Retry |
|
|
| |
| |
| |
|
|
| BASE_URL = "https://www.rocketreviews.com" |
| INDEX_URL = f"{BASE_URL}/rocketry-plans.html" |
| USER_AGENT = "RocketReviews-Dataset/1.0" |
| DEFAULT_DELAY = 1.0 |
|
|
| ROOT = Path(__file__).parent.parent.parent |
| SOURCE_DIR = ROOT / "source" / "plans" |
| DETAIL_DIR = SOURCE_DIR / "detail" |
|
|
| |
| |
| |
|
|
| logging.basicConfig( |
| level=logging.INFO, |
| format="%(asctime)s %(levelname)s %(message)s", |
| handlers=[logging.StreamHandler(sys.stdout)], |
| ) |
| log = logging.getLogger(__name__) |
|
|
| |
| |
| |
|
|
|
|
| def _build_session() -> requests.Session: |
| s = requests.Session() |
| s.headers["User-Agent"] = USER_AGENT |
| retry = Retry( |
| total=3, |
| backoff_factor=2.0, |
| status_forcelist=[429, 500, 502, 503, 504], |
| allowed_methods=["GET"], |
| ) |
| s.mount("https://", HTTPAdapter(max_retries=retry)) |
| s.mount("http://", HTTPAdapter(max_retries=retry)) |
| return s |
|
|
|
|
| class RateLimiter: |
| def __init__(self, delay: float) -> None: |
| self.delay = delay |
| self._last: float = 0.0 |
|
|
| def wait(self) -> None: |
| elapsed = time.monotonic() - self._last |
| if elapsed < self.delay: |
| time.sleep(self.delay - elapsed) |
| self._last = time.monotonic() |
|
|
|
|
| |
| |
| |
|
|
|
|
| def _slug_from_path(path: str) -> str: |
| """ |
| Extract the slug from a url path. |
| e.g. '/1940-exploratory-planetary-cargo-ferry-180703114912.html' -> |
| '1940-exploratory-planetary-cargo-ferry' |
| """ |
| name = path.lstrip("/").removesuffix(".html") |
| |
| name = re.sub(r"-\d{10,}$", "", name) |
| return name |
|
|
|
|
| def _parse_index(html: str) -> list[dict]: |
| """ |
| Parse the rocketry-plans.html static page and return one record |
| per plan from the main data table. |
| """ |
| soup = BeautifulSoup(html, "lxml") |
| records = [] |
|
|
| |
| tables = soup.find_all("table") |
| table = tables[-1] if tables else None |
| |
| if not table: |
| log.warning("Could not find the plans table on the index page.") |
| return records |
|
|
| |
| for row in table.find_all("tr")[1:]: |
| cells = row.find_all(["td", "th"]) |
| if len(cells) < 5: |
| continue |
| |
| |
| source = cells[1].get_text(strip=True) or None |
| |
| title_a = cells[2].find("a") |
| title_trunc = title_a.get_text(strip=True) if title_a else cells[2].get_text(strip=True) |
| |
| detail_path = title_a["href"] if title_a and title_a.has_attr("href") else None |
| |
| |
| if not detail_path: |
| continue |
| |
| detail_url = detail_path if detail_path.startswith("http") else f"{BASE_URL}{detail_path}" |
| slug = _slug_from_path(detail_path) |
| |
| style = cells[3].get_text(strip=True) or None |
| |
| site_a = cells[4].find("a") |
| site_name = site_a.get_text(strip=True) if site_a else cells[4].get_text(strip=True) |
| external_url = site_a["href"] if site_a and site_a.has_attr("href") else None |
| |
| |
| if not site_name: |
| site_name = None |
|
|
| records.append({ |
| "slug": slug, |
| "title_truncated": title_trunc, |
| "source": source, |
| "style": style, |
| "site": { |
| "name": site_name, |
| "url": external_url |
| }, |
| "url": detail_url |
| }) |
|
|
| return records |
|
|
|
|
| def _parse_detail(html: str, index_rec: dict) -> dict: |
| """Merge index-level fields with full title from the detail page.""" |
| soup = BeautifulSoup(html, "lxml") |
| |
| |
| h1 = soup.find("h1") |
| full_title = None |
| if h1: |
| |
| raw_h1 = h1.get_text(strip=True) |
| full_title = re.sub(r"^Rocketry Plans/Instructions\s*-\s*", "", raw_h1, flags=re.I).strip() |
| |
| |
| if not full_title: |
| full_title = index_rec.get("title_truncated") |
| |
| |
| out_rec = {k: v for k, v in index_rec.items() if k != "title_truncated"} |
| |
| return { |
| **out_rec, |
| "title": full_title, |
| "scraped_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"), |
| } |
|
|
|
|
| |
| |
| |
|
|
|
|
| def fetch_index(session: requests.Session) -> list[dict]: |
| log.info("Fetching plans index from %s", INDEX_URL) |
| resp = session.get(INDEX_URL, timeout=30) |
| resp.raise_for_status() |
| records = _parse_index(resp.text) |
| log.info("Index returned %d records.", len(records)) |
| return records |
|
|
|
|
| def scrape_detail( |
| session: requests.Session, |
| rate: RateLimiter, |
| index_rec: dict, |
| force: bool = False, |
| ) -> Optional[dict]: |
| slug = index_rec["slug"] |
| shard = slug[0].lower() if slug else "_" |
| shard_dir = DETAIL_DIR / shard |
| dest = shard_dir / f"{slug}.json" |
|
|
| if dest.exists() and not force: |
| log.debug("Already scraped %s, skipping.", slug) |
| return None |
|
|
| url = index_rec["url"] |
| rate.wait() |
|
|
| try: |
| resp = session.get(url, timeout=30) |
| resp.raise_for_status() |
| except requests.RequestException as exc: |
| log.warning("Failed to fetch plan %s: %s", slug, exc) |
| return None |
|
|
| return _parse_detail(resp.text, index_rec) |
|
|
|
|
| |
| |
| |
|
|
|
|
| def main() -> None: |
| parser = argparse.ArgumentParser(description="Scrape RocketReviews.com plans.") |
| parser.add_argument( |
| "--delay", |
| type=float, |
| default=DEFAULT_DELAY, |
| help=f"Seconds between requests (default: {DEFAULT_DELAY})", |
| ) |
| parser.add_argument( |
| "--limit", |
| type=int, |
| default=None, |
| help="Stop after scraping this many detail pages (useful for testing)", |
| ) |
| parser.add_argument( |
| "--force", |
| action="store_true", |
| help="Re-scrape plans that already have a saved detail file", |
| ) |
| args = parser.parse_args() |
|
|
| SOURCE_DIR.mkdir(parents=True, exist_ok=True) |
| DETAIL_DIR.mkdir(parents=True, exist_ok=True) |
|
|
| session = _build_session() |
| rate = RateLimiter(args.delay) |
|
|
| |
| |
| |
| records = fetch_index(session) |
|
|
| scraped_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") |
| index_path = SOURCE_DIR / "index.jsonl" |
| with index_path.open("w", encoding="utf-8") as f: |
| for rec in records: |
| |
| |
| out_rec = {k: v for k, v in rec.items() if k != "title_truncated"} |
| out_rec["title"] = rec["title_truncated"] |
| f.write(json.dumps({**out_rec, "scraped_at": scraped_at}) + "\n") |
| log.info("Wrote %d index records to %s", len(records), index_path) |
|
|
| |
| |
| |
| if args.limit: |
| records = records[: args.limit] |
|
|
| ok = skipped = failed = 0 |
| total = len(records) |
|
|
| for i, rec in enumerate(records, 1): |
| result = scrape_detail(session, rate, rec, force=args.force) |
|
|
| if result is None: |
| skipped += 1 |
| continue |
|
|
| slug = rec["slug"] |
| shard = slug[0].lower() if slug else "_" |
| shard_dir = DETAIL_DIR / shard |
| shard_dir.mkdir(parents=True, exist_ok=True) |
| dest = shard_dir / f"{slug}.json" |
| |
| try: |
| dest.write_text( |
| json.dumps(result, indent=2, ensure_ascii=False), encoding="utf-8" |
| ) |
| ok += 1 |
| log.debug("Saved %s", dest.name) |
| except OSError as exc: |
| log.warning("Could not write %s: %s", dest, exc) |
| failed += 1 |
|
|
| if i % 25 == 0 or i == total: |
| log.info( |
| "Progress: %d/%d — ok=%d skipped=%d failed=%d", |
| i, |
| total, |
| ok, |
| skipped, |
| failed, |
| ) |
|
|
| log.info("Done — ok=%d skipped=%d failed=%d", ok, skipped, failed) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|