MacroLens / code /collect_filings.py
itouchz's picture
Add files using upload-large-folder tool
5995ef5 verified
"""Step 4: Download SEC filings (PDF + Markdown) and XBRL company facts.
Uses:
- SecEdgarDownloader from projects.tools.sec_edgar.downloader
- playwright.html_to_pdf from projects.tools.utils.playwright
- html2text (project dependency) for HTML -> Markdown conversion
- SEC XBRL CompanyFacts API for structured financial data
Pipeline per ticker:
1. Resolve CIK (direct ticker lookup, then fuzzy match by company name)
2. Download HTML filings to a temp dir via SecEdgarDownloader.download()
3. Convert HTML -> PDF via playwright.html_to_pdf() (human-readable)
4. Convert HTML -> Markdown via html2text (LLM-friendly)
5. Download XBRL company facts JSON from SEC CompanyFacts API
CIK resolution strategy (every US-listed company MUST have a CIK):
1. Direct ticker → CIK lookup via SEC company_tickers.json
2. If that fails, fuzzy match by company name via SEC company_tickers_exchange.json
3. Only if both fail is the ticker skipped (logged as warning)
Parallel tickers via asyncio.Semaphore.
Output:
data/filings/{TICKER}/*.pdf -- human-readable PDFs
data/filings/{TICKER}/*.md -- LLM-friendly Markdown
data/xbrl/raw/{TICKER}.json -- structured XBRL facts
data/xbrl/cik_map.json -- ticker → CIK mapping
"""
from __future__ import annotations
import asyncio
import json
import logging
import os
import tempfile
from pathlib import Path
import html2text
import httpx
import pandas as pd
from bs4 import BeautifulSoup
from projects.tools.sec_edgar.downloader import SecEdgarDownloader
from projects.tools.utils.playwright import html_to_pdf
from . import config
logger = logging.getLogger(__name__)
# Shared html2text converter configuration
_h2t = html2text.HTML2Text()
_h2t.ignore_links = False
_h2t.ignore_images = True
_h2t.body_width = 0 # no line wrapping -- let the consumer handle it
_h2t.protect_links = True
_h2t.wrap_links = False
# Inline XBRL tag names to strip (they contain machine-readable noise)
_XBRL_STRIP_TAGS = ["ix:header", "ix:hidden"]
def _html_to_markdown(html_path: Path, md_path: Path) -> None:
"""Convert an SEC filing HTML directly to Markdown using html2text.
Strips inline XBRL metadata (ix:header, ix:hidden) before conversion
so the resulting Markdown contains only the human-readable filing text.
"""
html_content = html_path.read_text(encoding="utf-8", errors="replace")
soup = BeautifulSoup(html_content, "html.parser")
for tag_name in _XBRL_STRIP_TAGS:
for tag in soup.find_all(tag_name):
tag.decompose()
markdown_text: str = _h2t.handle(str(soup))
_ = md_path.write_text(markdown_text, encoding="utf-8")
# ---------------------------------------------------------------------------
# XBRL Company Facts helpers
# ---------------------------------------------------------------------------
_XBRL_RAW_DIR = config.XBRL_DIR / "raw"
def _atomic_json_write(data: dict, dest: Path) -> None:
"""Write JSON atomically (tempfile + os.replace)."""
dest.parent.mkdir(parents=True, exist_ok=True)
fd, tmp = tempfile.mkstemp(suffix=".json", dir=dest.parent)
try:
os.close(fd)
with open(tmp, "w", encoding="utf-8") as fh:
json.dump(data, fh, ensure_ascii=False)
os.replace(tmp, dest)
except BaseException:
try:
os.unlink(tmp)
except OSError:
pass
raise
async def _resolve_cik_map(
client: httpx.AsyncClient,
tickers: list[str],
) -> dict[str, str]:
"""Build ticker → zero-padded CIK mapping (single API call, cached)."""
url = "https://www.sec.gov/files/company_tickers.json"
resp = await client.get(url)
resp.raise_for_status()
await asyncio.sleep(0.1)
raw: dict = resp.json()
sec_map: dict[str, str] = {}
for entry in raw.values():
t = str(entry.get("ticker", "")).upper()
cik = str(entry.get("cik_str", ""))
if t and cik:
sec_map[t] = cik.zfill(10)
result: dict[str, str] = {}
missing: list[str] = []
for ticker in tickers:
cik = sec_map.get(ticker.upper())
if cik:
result[ticker] = cik
else:
missing.append(ticker)
if missing:
logger.info(
"CIK map: %d resolved, %d missing (will try fuzzy match during filing download)",
len(result), len(missing),
)
return result
async def _download_xbrl_facts(
client: httpx.AsyncClient,
ticker: str,
cik: str,
) -> bool:
"""Download one company's XBRL facts JSON. Returns True on success."""
dest = _XBRL_RAW_DIR / f"{ticker}.json"
if dest.exists() and dest.stat().st_size > 100:
return True # already collected
url = config.XBRL_COMPANY_FACTS_URL.format(cik=cik)
for attempt in range(3):
try:
resp = await client.get(url)
if resp.status_code == 404:
_atomic_json_write(
{"_no_xbrl": True, "cik": cik, "ticker": ticker}, dest,
)
return True
resp.raise_for_status()
_atomic_json_write(resp.json(), dest)
return True
except httpx.HTTPStatusError as exc:
if exc.response.status_code == 429:
await asyncio.sleep(2 ** (attempt + 1))
elif exc.response.status_code >= 500:
await asyncio.sleep(2 ** attempt)
else:
logger.warning("XBRL HTTP %d for %s", exc.response.status_code, ticker)
return False
except (httpx.ConnectError, httpx.ReadTimeout):
await asyncio.sleep(2 ** attempt)
logger.warning("XBRL download failed for %s after 3 attempts", ticker)
return False
# ---------------------------------------------------------------------------
# Filing download helpers
# ---------------------------------------------------------------------------
_MAX_RETRIES = 3
async def _retry_async(coro_factory, description: str, retries: int = _MAX_RETRIES):
"""Call *coro_factory()* up to *retries* times with exponential backoff.
ValueError is never retried (it signals a deterministic failure like
missing CIK, not a transient network issue).
"""
for attempt in range(retries):
try:
return await coro_factory()
except ValueError:
raise # deterministic – retrying won't help
except Exception as exc:
if attempt < retries - 1:
wait = 2 ** attempt * 3 # 3s, 6s, 12s
logger.warning("%s failed (attempt %d/%d), retrying in %ds: %s",
description, attempt + 1, retries, wait, exc)
await asyncio.sleep(wait)
else:
raise
async def _download_with_fallback(
ticker: str,
company_name: str,
downloader: SecEdgarDownloader,
output_dir: Path,
) -> list[Path]:
"""Download filings, falling back to CIK-by-company-name if ticker lookup fails.
Every US-listed company has a CIK. The direct ticker→CIK map sometimes
misses tickers (recent renames, class shares, etc.), so we fall back to
fuzzy-matching the company name against the SEC title database.
"""
try:
return await downloader.download(
ticker=ticker,
filing_types=config.SEC_FILING_TYPES, # type: ignore[arg-type]
from_year=config.START_YEAR,
to_year=config.END_YEAR,
output_dir=output_dir,
)
except ValueError:
pass # ticker not in CIK map – try fallback
if not company_name:
raise ValueError(f"Ticker {ticker} not in SEC CIK map and no company name for fallback.")
logger.info("%s: ticker lookup failed, trying fuzzy match for '%s' ...", ticker, company_name)
matches = await downloader.score_title_fuzzy_match(company_name)
if not matches:
raise ValueError(f"Ticker {ticker}: no fuzzy matches for '{company_name}'.")
best = matches[0]
if best.score < 60:
raise ValueError(
f"Ticker {ticker}: best fuzzy match '{best.title}' (CIK={best.cik}) "
f"scored only {best.score:.0f} – too low to trust."
)
logger.info("%s: fuzzy matched → '%s' (CIK=%s, score=%.0f)", ticker, best.title, best.cik, best.score)
return await downloader.download(
cik=best.cik,
filing_types=config.SEC_FILING_TYPES, # type: ignore[arg-type]
from_year=config.START_YEAR,
to_year=config.END_YEAR,
output_dir=output_dir,
)
async def _download_ticker_filings(
ticker: str,
company_name: str,
downloader: SecEdgarDownloader,
semaphore: asyncio.Semaphore,
xbrl_client: httpx.AsyncClient | None = None,
cik: str | None = None,
) -> int:
"""Download filings + XBRL for a single ticker. Returns number of filings processed."""
ticker_dir = config.FILINGS_DIR / ticker
done_flag = ticker_dir / ".done"
filings_done = done_flag.exists()
xbrl_path = _XBRL_RAW_DIR / f"{ticker}.json"
xbrl_done = xbrl_path.exists() and xbrl_path.stat().st_size > 100
if filings_done and xbrl_done:
return 0 # everything already done
async with semaphore:
filing_count = 0
# ── Filing documents (PDF + Markdown) ─────────────────────────
if not filings_done:
logger.info("Downloading filings for %s ...", ticker)
ticker_dir.mkdir(parents=True, exist_ok=True)
all_conversions_ok = True
try:
with tempfile.TemporaryDirectory(prefix="whatif_sec_") as tmpdir:
tmpdir_path = Path(tmpdir)
html_paths = await _retry_async(
lambda: _download_with_fallback(
ticker, company_name, downloader, tmpdir_path,
),
description=f"SEC download {ticker}",
)
for htm_path in html_paths:
base_name = htm_path.parent.name
pdf_path = ticker_dir / (base_name + ".pdf")
md_path = ticker_dir / (base_name + ".md")
# HTML -> PDF (human-readable; skip if already exists)
if not pdf_path.exists():
try:
await _retry_async(
lambda _h=htm_path, _p=pdf_path: html_to_pdf(_h, _p),
description=f"PDF {ticker}/{htm_path.name}",
)
except Exception as exc:
logger.warning("PDF conversion failed for %s/%s after retries: %s",
ticker, htm_path.name, exc)
all_conversions_ok = False
# HTML -> Markdown (LLM-friendly; skip if already exists)
if not md_path.exists():
try:
_html_to_markdown(htm_path, md_path)
except Exception as exc:
logger.warning("Markdown conversion failed for %s/%s: %s",
ticker, htm_path.name, exc)
all_conversions_ok = False
filing_count += 1
# Only mark as done if ALL conversions succeeded
if all_conversions_ok:
_ = done_flag.write_text(f"filings={filing_count}")
logger.info("%s: %d filings processed (PDF + MD).%s",
ticker, filing_count,
"" if all_conversions_ok else " (some conversions failed, will retry)")
except ValueError as exc:
logger.warning("Ticker %s: CIK resolution failed after all strategies: %s", ticker, exc)
except Exception as exc:
logger.warning("Filing download failed for %s: %s", ticker, exc)
# ── XBRL company facts ────────────────────────────────────────
if not xbrl_done and xbrl_client is not None and cik:
try:
await _download_xbrl_facts(xbrl_client, ticker, cik)
except Exception as exc:
logger.warning("XBRL download failed for %s: %s", ticker, exc)
return filing_count
def _load_company_names() -> dict[str, str]:
"""Load ticker → company name mapping from the universe CSV."""
univ_path = config.UNIVERSE_DIR / "benchmark_universe.csv"
if not univ_path.exists():
return {}
df = pd.read_csv(univ_path)
if "ticker" in df.columns and "name" in df.columns:
return dict(zip(df["ticker"], df["name"].fillna("")))
return {}
async def run_async(tickers: list[str] | None = None) -> dict[str, int]:
"""Execute Step 4 (async): download filings + XBRL facts.
Returns ``{ticker: filing_count}``.
"""
config.FILINGS_DIR.mkdir(parents=True, exist_ok=True)
_XBRL_RAW_DIR.mkdir(parents=True, exist_ok=True)
user_agent = os.getenv("SEC_EDGAR_USER_AGENT")
if not user_agent:
raise ValueError("Set SEC_EDGAR_USER_AGENT environment variable.")
if tickers is None:
universe_path = config.UNIVERSE_DIR / "benchmark_universe.csv"
if not universe_path.exists():
raise FileNotFoundError(f"Run Step 1 first: {universe_path}")
tickers = pd.read_csv(universe_path)["ticker"].tolist()
# Load company names for CIK fuzzy-match fallback
company_names = _load_company_names()
# Resolve CIK map for XBRL (single API call, reused across all tickers)
headers = {"User-Agent": user_agent, "Accept-Encoding": "gzip, deflate"}
async with httpx.AsyncClient(
headers=headers, timeout=30.0, follow_redirects=True,
) as xbrl_client:
cik_map = await _resolve_cik_map(xbrl_client, tickers)
# Persist CIK map for reference
cik_map_path = config.XBRL_DIR / "cik_map.json"
config.XBRL_DIR.mkdir(parents=True, exist_ok=True)
_atomic_json_write(cik_map, cik_map_path)
logger.info(
"Downloading SEC filings + XBRL for %d tickers "
"(%d with company names, %d with CIK) ...",
len(tickers), len(company_names), len(cik_map),
)
downloader = SecEdgarDownloader(user_agent=user_agent)
semaphore = asyncio.Semaphore(config.SEC_FILING_WORKERS)
tasks = [
_download_ticker_filings(
t,
company_names.get(t, ""),
downloader,
semaphore,
xbrl_client=xbrl_client,
cik=cik_map.get(t),
)
for t in tickers
]
results = await asyncio.gather(*tasks, return_exceptions=True)
summary: dict[str, int] = {}
for ticker, result in zip(tickers, results):
if isinstance(result, BaseException):
logger.warning("Ticker %s raised: %s", ticker, result)
summary[ticker] = 0
else:
summary[ticker] = result
total = sum(summary.values())
xbrl_count = sum(1 for f in _XBRL_RAW_DIR.glob("*.json") if f.stat().st_size > 100)
logger.info(
"SEC Step 4 complete: %d filings across %d tickers, %d XBRL facts downloaded.",
total, len(tickers), xbrl_count,
)
return summary
def run(tickers: list[str] | None = None) -> dict[str, int]:
"""Sync wrapper around the async implementation."""
return asyncio.run(run_async(tickers))