| """Layer 2: Preprocess raw data into a task-agnostic panel. |
| |
| A **pure function** of (raw data files + config). No API calls, no side |
| effects. Given the same raw data and config the output is deterministic. |
| |
| Takes ``config.GRANULARITY`` (``"daily"``, ``"weekly"``, ``"monthly"``) and |
| produces: |
| |
| data/processed/{granularity}/panel.parquet -- merged panel |
| data/processed/{granularity}/columns.json -- column-name groups |
| |
| Steps: |
| 2a. Load raw data (no transformations) |
| 2b. Resample to target granularity |
| 2c. Merge into panel |
| 2d. Derive time-varying metrics |
| 2e. Save |
| """ |
|
|
| from __future__ import annotations |
|
|
| import json |
| import logging |
| import re |
| from pathlib import Path |
|
|
| import numpy as np |
| import pandas as pd |
|
|
| from . import config |
|
|
| logger = logging.getLogger(__name__) |
|
|
|
|
| |
| |
| |
|
|
| def _load_prices() -> pd.DataFrame: |
| """Load raw daily prices and filter out rows with NaN close.""" |
| path = config.PRICES_DIR / "daily_prices.csv" |
| if not path.exists(): |
| raise FileNotFoundError(f"Run Step 3 first: {path}") |
| df = pd.read_csv(path, parse_dates=["Date"]) |
| df = df.rename(columns={ |
| "Date": "date", "Ticker": "ticker", |
| "Open": "open", "High": "high", "Low": "low", |
| "Close": "close", "Volume": "volume", "Adj Close": "adj_close", |
| }) |
| |
| keep = ["ticker", "date", "open", "high", "low", "close", "volume", "adj_close"] |
| df = df[[c for c in keep if c in df.columns]] |
| |
| |
| |
| df["date"] = pd.to_datetime(df["date"], errors="coerce") |
| |
| |
| |
| |
| if "adj_close" in df.columns: |
| bad_adj = df["adj_close"] < 0 |
| if bad_adj.any(): |
| n = int(bad_adj.sum()) |
| df.loc[bad_adj, "adj_close"] = df.loc[bad_adj, "close"] |
| logger.info("Recovery: replaced %d negative adj_close rows with close value", n) |
| |
| |
| |
| |
| if all(c in df.columns for c in ("open", "high", "low", "close")): |
| prev_bad = ((df["high"] < df[["open", "low", "close"]].max(axis=1)) | |
| (df["low"] > df[["open", "high", "close"]].min(axis=1))).sum() |
| if prev_bad: |
| ohlc = df[["open", "high", "low", "close"]].to_numpy() |
| df["high"] = ohlc.max(axis=1) |
| df["low"] = ohlc.min(axis=1) |
| logger.info("OHLC sanity: enforced high=max(O,H,L,C) / low=min(O,H,L,C) on %d rows", int(prev_bad)) |
| |
| before = len(df) |
| df = df.dropna(subset=["close"]) |
| dropped = before - len(df) |
| if dropped > 0: |
| logger.info("Dropped %d rows with NaN close in price data.", dropped) |
| return df.sort_values(["ticker", "date"]).reset_index(drop=True) |
|
|
|
|
| def _load_statement_long(ticker: str) -> pd.DataFrame: |
| """Load per-ticker statement CSVs into long-form (date, metric, value). |
| |
| yfinance statement CSVs: index = metric names, columns = date strings. |
| """ |
| records: list[dict] = [] |
| for suffix, key_map in [ |
| ("income", config.INCOME_KEYS), |
| ("balance", config.BALANCE_KEYS), |
| ("cashflow", config.CASHFLOW_KEYS), |
| ]: |
| csv_path = config.FUNDAMENTALS_DIR / f"{ticker}_{suffix}.csv" |
| if not csv_path.exists(): |
| continue |
| try: |
| raw = pd.read_csv(csv_path, index_col=0) |
| for orig_name, col_name in key_map.items(): |
| if orig_name in raw.index: |
| row = raw.loc[orig_name] |
| for date_str, val in row.items(): |
| try: |
| records.append({ |
| "date": pd.to_datetime(date_str), |
| "metric": col_name, |
| "value": pd.to_numeric(val, errors="coerce"), |
| }) |
| except Exception: |
| continue |
| except Exception as exc: |
| logger.debug("Could not load %s for %s: %s", suffix, ticker, exc) |
|
|
| if not records: |
| return pd.DataFrame() |
|
|
| long_df = pd.DataFrame(records) |
| |
| wide = long_df.pivot_table(index="date", columns="metric", values="value", aggfunc="first") |
| wide = wide.reset_index().sort_values("date") |
| wide.columns.name = None |
|
|
| |
| |
| _FLOW_METRICS = { |
| "stmt_revenue", "stmt_net_income", "stmt_ebitda", "stmt_ebit", |
| "stmt_gross_profit", "stmt_operating_income", "stmt_basic_eps", |
| "stmt_operating_cashflow", "stmt_free_cashflow", "stmt_capex", |
| "stmt_cogs", "stmt_operating_expenses", "stmt_financing_cashflow", |
| } |
| for col in list(wide.columns): |
| if col in _FLOW_METRICS: |
| ttm_col = f"{col}_ttm" |
| wide[ttm_col] = wide[col].rolling(window=4, min_periods=4).sum() |
|
|
| wide["ticker"] = ticker |
| return wide |
|
|
|
|
| def _load_xbrl_statements(tickers: list[str]) -> pd.DataFrame: |
| """Load historical financial statements from SEC EDGAR XBRL facts. |
| |
| Reads ``data/xbrl/parsed/company_facts.parquet`` and pivots the |
| relevant tags into the same (ticker, date, stmt_*) wide format that |
| ``_load_statement_long`` produces. This gives us quarterly data |
| going back 10+ years — far beyond yfinance's ~5-quarter window. |
| |
| Returns an empty DataFrame if the XBRL data is unavailable. |
| """ |
| xbrl_path = config.DATA_DIR / "xbrl" / "parsed" / "company_facts.parquet" |
| if not xbrl_path.exists(): |
| logger.warning("XBRL facts not found at %s — skipping.", xbrl_path) |
| return pd.DataFrame() |
|
|
| |
| wanted_tags: set[str] = set() |
| for tags in config.XBRL_TAG_MAP.values(): |
| wanted_tags.update(tags) |
| wanted_tags.update(config.XBRL_DA_TAGS) |
| |
| wanted_tags.add("LiabilitiesAndStockholdersEquity") |
|
|
| facts = pd.read_parquet( |
| xbrl_path, |
| columns=["ticker", "tag", "period_start", "period_end", "value", |
| "form", "fiscal_year", "fiscal_period", "filed"], |
| ) |
|
|
| |
| facts = facts[ |
| facts["form"].isin(["10-K", "10-Q", "10-K/A", "10-Q/A", "20-F", "20-F/A", "6-K", "40-F", "40-F/A"]) |
| & facts["ticker"].isin(tickers) |
| & facts["tag"].isin(wanted_tags) |
| ].copy() |
|
|
| if facts.empty: |
| logger.warning("No matching XBRL facts after filtering.") |
| return pd.DataFrame() |
|
|
| facts["period_start"] = pd.to_datetime(facts["period_start"], errors="coerce") |
| facts["period_end"] = pd.to_datetime(facts["period_end"], errors="coerce") |
| facts["filed"] = pd.to_datetime(facts["filed"], errors="coerce") |
| facts["value"] = pd.to_numeric(facts["value"], errors="coerce") |
| facts = facts.dropna(subset=["period_end", "value"]) |
|
|
| |
| |
| |
| |
| |
| |
| _BALANCE_SHEET_TAGS = { |
| "Assets", "Liabilities", "LiabilitiesAndStockholdersEquity", |
| "StockholdersEquity", "StockholdersEquityIncludingPortionAttributableToNoncontrollingInterest", |
| "LongTermDebt", "LongTermDebtAndCapitalLeaseObligations", |
| "ShortTermBorrowings", "DebtCurrent", "LongTermDebtNoncurrent", |
| "CashAndCashEquivalentsAtCarryingValue", |
| "CashCashEquivalentsRestrictedCashAndRestrictedCashEquivalents", |
| "EntityCommonStockSharesOutstanding", "CommonStockSharesOutstanding", |
| "WeightedAverageNumberOfShareOutstandingBasicAndDiluted", |
| "WeightedAverageNumberOfSharesOutstandingBasic", |
| "WeightedAverageNumberOfDilutedSharesOutstanding", |
| "CommonSharesIssued", "CommonSharesOutstanding", |
| |
| "AccountsReceivableNetCurrent", "AccountsReceivableNet", |
| "TradeAndOtherCurrentReceivables", |
| "InventoryNet", "Inventories", "CurrentInventories", |
| "AssetsCurrent", "CurrentAssets", |
| "PropertyPlantAndEquipmentNet", "PropertyPlantAndEquipment", |
| "Goodwill", "GoodwillGross", |
| "AccountsPayableCurrent", "AccountsPayable", |
| "TradeAndOtherCurrentPayables", |
| "LiabilitiesCurrent", "CurrentLiabilities", |
| } |
|
|
| |
| is_balance = facts["tag"].isin(_BALANCE_SHEET_TAGS) |
| balance_facts = facts[is_balance].copy() |
| flow_facts = facts[~is_balance].copy() |
|
|
| |
| quarterly_flow = flow_facts[flow_facts["fiscal_period"].isin(["Q1", "Q2", "Q3", "Q4"])].copy() |
| if quarterly_flow["period_start"].notna().any(): |
| duration = (quarterly_flow["period_end"] - quarterly_flow["period_start"]).dt.days |
| quarterly_flow = quarterly_flow[duration.isna() | (duration <= 100)] |
|
|
| |
| |
| |
| fy_flow = flow_facts[flow_facts["fiscal_period"] == "FY"].copy() |
| if not fy_flow.empty and not quarterly_flow.empty: |
| fy_deduped = fy_flow.sort_values("filed").drop_duplicates( |
| subset=["ticker", "tag", "period_end"], keep="last", |
| ).dropna(subset=["period_start", "period_end"]) |
| q_deduped = quarterly_flow.sort_values("filed").drop_duplicates( |
| subset=["ticker", "tag", "period_end"], keep="last", |
| ) |
| q_deduped = q_deduped[q_deduped["fiscal_period"].isin(["Q1", "Q2", "Q3"])] |
|
|
| if not fy_deduped.empty and not q_deduped.empty: |
| fy_key = fy_deduped[["ticker", "tag", "period_start", "period_end", "value"]].copy() |
| fy_key = fy_key.rename(columns={ |
| "period_start": "fy_start", "period_end": "fy_end", "value": "fy_value", |
| }) |
| q_key = q_deduped[["ticker", "tag", "period_end", "value"]].copy() |
| q_key = q_key.rename(columns={"period_end": "q_end", "value": "q_value"}) |
|
|
| merged = fy_key.merge(q_key, on=["ticker", "tag"], how="inner") |
| merged = merged[(merged["q_end"] > merged["fy_start"]) & (merged["q_end"] <= merged["fy_end"])] |
|
|
| agg = merged.groupby(["ticker", "tag", "fy_end"]).agg( |
| q_count=("q_value", "size"), |
| q_sum=("q_value", "sum"), |
| fy_value=("fy_value", "first"), |
| fy_start=("fy_start", "first"), |
| ).reset_index() |
| agg = agg[agg["q_count"] == 3] |
| agg["q4_value"] = agg["fy_value"] - agg["q_sum"] |
| agg = agg[agg["q4_value"] > 0] |
|
|
| if not agg.empty: |
| q4_rows = fy_deduped.merge( |
| agg[["ticker", "tag", "fy_end", "q4_value"]], |
| left_on=["ticker", "tag", "period_end"], |
| right_on=["ticker", "tag", "fy_end"], |
| how="inner", |
| ) |
| q4_rows["value"] = q4_rows["q4_value"] |
| q4_rows["fiscal_period"] = "Q4_derived" |
| q4_rows = q4_rows.drop(columns=["fy_end", "q4_value"], errors="ignore") |
| quarterly_flow = pd.concat([quarterly_flow, q4_rows], ignore_index=True) |
| logger.info("Derived %d Q4 standalone values from FY - (Q1+Q2+Q3).", len(q4_rows)) |
|
|
| |
| balance_facts = balance_facts[balance_facts["fiscal_period"].isin( |
| ["Q1", "Q2", "Q3", "Q4", "FY"] |
| )] |
| facts = pd.concat([balance_facts, quarterly_flow], ignore_index=True) |
|
|
| |
| facts = facts.sort_values("filed").drop_duplicates( |
| subset=["ticker", "tag", "period_end"], keep="last", |
| ) |
|
|
| |
| col_frames: dict[str, pd.DataFrame] = {} |
|
|
| for stmt_col, tag_list in config.XBRL_TAG_MAP.items(): |
| |
| |
| parts: list[pd.DataFrame] = [] |
| covered_keys: set[tuple[str, pd.Timestamp]] = set() |
|
|
| for tag in tag_list: |
| subset = facts[facts["tag"] == tag][ |
| ["ticker", "period_end", "value"] |
| ].copy() |
| if subset.empty: |
| continue |
| if covered_keys: |
| keep = [ |
| (t, d) not in covered_keys |
| for t, d in zip(subset["ticker"], subset["period_end"]) |
| ] |
| subset = subset[keep] |
| if subset.empty: |
| continue |
| covered_keys.update( |
| zip(subset["ticker"], subset["period_end"]) |
| ) |
| parts.append(subset) |
|
|
| if parts: |
| combined = pd.concat(parts, ignore_index=True) |
| combined = combined.rename(columns={"value": stmt_col}) |
| col_frames[stmt_col] = combined |
|
|
| if not col_frames: |
| logger.warning("No XBRL facts resolved to stmt_ columns.") |
| return pd.DataFrame() |
|
|
| |
| items = iter(col_frames.values()) |
| wide = next(items) |
| for extra in items: |
| wide = wide.merge(extra, on=["ticker", "period_end"], how="outer") |
|
|
| |
|
|
| |
| if "stmt_ebit" in wide.columns: |
| da_facts = facts[facts["tag"].isin(config.XBRL_DA_TAGS)].copy() |
| if not da_facts.empty: |
| da_facts = da_facts.sort_values("filed").drop_duplicates( |
| subset=["ticker", "period_end"], keep="last", |
| ) |
| da_map = da_facts.set_index(["ticker", "period_end"])["value"] |
| wide_idx = wide.set_index(["ticker", "period_end"]) |
| da_aligned = da_map.reindex(wide_idx.index) |
| ebitda_derived = wide_idx["stmt_ebit"] + da_aligned |
| if "stmt_ebitda" not in wide.columns: |
| wide["stmt_ebitda"] = ebitda_derived.values |
| else: |
| mask = wide["stmt_ebitda"].isna() |
| wide.loc[mask, "stmt_ebitda"] = ebitda_derived.values[mask.values] |
|
|
| |
| if "stmt_operating_cashflow" in wide.columns and "stmt_capex" in wide.columns: |
| if "stmt_free_cashflow" not in wide.columns: |
| wide["stmt_free_cashflow"] = ( |
| wide["stmt_operating_cashflow"] - wide["stmt_capex"].abs() |
| ) |
| else: |
| mask = wide["stmt_free_cashflow"].isna() |
| wide.loc[mask, "stmt_free_cashflow"] = ( |
| wide.loc[mask, "stmt_operating_cashflow"] |
| - wide.loc[mask, "stmt_capex"].abs() |
| ) |
|
|
| |
| if "stmt_tax_provision" in wide.columns and "stmt_pretax_income" in wide.columns: |
| if "stmt_tax_rate" not in wide.columns: |
| pretax = wide["stmt_pretax_income"].replace(0, np.nan) |
| wide["stmt_tax_rate"] = (wide["stmt_tax_provision"].abs() / pretax).clip(0, 0.5) |
|
|
| |
| if "stmt_revenue" in wide.columns and "stmt_cogs" in wide.columns: |
| if "stmt_gross_profit" not in wide.columns: |
| wide["stmt_gross_profit"] = wide["stmt_revenue"] - wide["stmt_cogs"].abs() |
| else: |
| mask = wide["stmt_gross_profit"].isna() |
| wide.loc[mask, "stmt_gross_profit"] = ( |
| wide.loc[mask, "stmt_revenue"] - wide.loc[mask, "stmt_cogs"].abs() |
| ) |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| if all(c in wide.columns for c in ["stmt_total_assets", "stmt_total_liabilities", "stmt_total_equity"]): |
| |
| lae_facts = facts[facts["tag"] == "LiabilitiesAndStockholdersEquity"][ |
| ["ticker", "period_end", "value", "filed"] |
| ].copy() |
| lae_map = pd.Series(dtype=float) |
| if not lae_facts.empty: |
| lae_facts = lae_facts.sort_values("filed").drop_duplicates( |
| subset=["ticker", "period_end"], keep="last", |
| ) |
| lae_map = lae_facts.set_index(["ticker", "period_end"])["value"] |
|
|
| |
| widx = wide.set_index(["ticker", "period_end"]) |
| A = widx["stmt_total_assets"] |
| L = widx["stmt_total_liabilities"] |
| E = widx["stmt_total_equity"] |
| lae = lae_map.reindex(widx.index) if not lae_map.empty else pd.Series(np.nan, index=widx.index) |
|
|
| def _rel_diff(x, y): |
| return (x - y).abs() / x.abs().replace(0, np.nan) |
|
|
| TOL = 0.01 |
| pre_bad = _rel_diff(A, L.fillna(0) + E.fillna(0)) > TOL |
| pre_bad_count = pre_bad.sum() |
| fix_counts = {} |
|
|
| |
| |
| pass2_mask = A.notna() & L.notna() & E.isna() |
| if pass2_mask.any(): |
| widx.loc[pass2_mask, "stmt_total_equity"] = (A - L)[pass2_mask] |
| fix_counts["derive_E_from_A_minus_L"] = pass2_mask.sum() |
|
|
| |
| pass3_mask = A.notna() & L.isna() & E.notna() |
| if pass3_mask.any(): |
| widx.loc[pass3_mask, "stmt_total_liabilities"] = (A - E)[pass3_mask] |
| fix_counts["derive_L_from_A_minus_E"] = pass3_mask.sum() |
|
|
| |
| pass4_mask = A.isna() & L.notna() & E.notna() |
| if pass4_mask.any(): |
| widx.loc[pass4_mask, "stmt_total_assets"] = (L + E)[pass4_mask] |
| fix_counts["derive_A_from_L_plus_E"] = pass4_mask.sum() |
|
|
| |
| A = widx["stmt_total_assets"] |
| L = widx["stmt_total_liabilities"] |
| E = widx["stmt_total_equity"] |
|
|
| |
| if not lae_map.empty: |
| lae_aligned = lae_map.reindex(widx.index) |
| bad5 = (_rel_diff(A, L.fillna(0) + E.fillna(0)) > TOL) & \ |
| (_rel_diff(A, lae_aligned) <= TOL) & lae_aligned.notna() & \ |
| A.notna() & L.notna() |
| if bad5.any(): |
| widx.loc[bad5, "stmt_total_equity"] = (A - L)[bad5] |
| fix_counts["equity_fix_via_LAE"] = bad5.sum() |
| |
| refresh_E = widx["stmt_total_equity"] |
| bad6 = (_rel_diff(A, lae_aligned) > TOL) & \ |
| (_rel_diff(lae_aligned, L.fillna(0) + refresh_E.fillna(0)) <= TOL) & \ |
| lae_aligned.notna() & L.notna() & refresh_E.notna() |
| if bad6.any(): |
| widx.loc[bad6, "stmt_total_assets"] = lae_aligned[bad6] |
| fix_counts["assets_fix_via_LAE"] = bad6.sum() |
|
|
| |
| |
| A = widx["stmt_total_assets"] |
| L = widx["stmt_total_liabilities"] |
| E = widx["stmt_total_equity"] |
| still_bad = _rel_diff(A, L.fillna(0) + E.fillna(0)) > TOL |
| if still_bad.any(): |
| n_drop = still_bad.sum() |
| widx.loc[still_bad, ["stmt_total_assets", "stmt_total_liabilities", "stmt_total_equity"]] = np.nan |
| fix_counts["dropped_unreliable"] = n_drop |
|
|
| wide = widx.reset_index() |
| total_fixes = sum(fix_counts.values()) |
| logger.info( |
| "Balance equation: %d pre-fix mismatches. Applied: %s. Total fixed/dropped: %d / %d.", |
| int(pre_bad_count), fix_counts, total_fixes, len(wide), |
| ) |
|
|
| wide = wide.rename(columns={"period_end": "date"}) |
| wide = wide.sort_values(["ticker", "date"]).reset_index(drop=True) |
|
|
| |
| |
| |
| |
| _FLOW_METRICS = { |
| "stmt_revenue", "stmt_net_income", "stmt_ebitda", "stmt_ebit", |
| "stmt_gross_profit", "stmt_operating_income", "stmt_basic_eps", |
| "stmt_operating_cashflow", "stmt_free_cashflow", "stmt_capex", |
| "stmt_cogs", "stmt_operating_expenses", "stmt_financing_cashflow", |
| } |
| for col in list(wide.columns): |
| if col in _FLOW_METRICS: |
| ttm_col = f"{col}_ttm" |
| wide[ttm_col] = np.nan |
| for ticker, grp in wide.groupby("ticker"): |
| valid = grp[col].dropna() |
| if len(valid) >= 4: |
| ttm_vals = valid.rolling(window=4, min_periods=4).sum() |
| wide.loc[ttm_vals.index, ttm_col] = ttm_vals |
|
|
| logger.info( |
| "Loaded XBRL statements: %d rows, %d tickers, %d stmt columns, " |
| "date range %s to %s.", |
| len(wide), wide["ticker"].nunique(), |
| sum(1 for c in wide.columns if c.startswith("stmt_")), |
| wide["date"].min().date(), wide["date"].max().date(), |
| ) |
| return wide |
|
|
|
|
| def _load_macro_raw() -> pd.DataFrame: |
| """Load all FRED + EIA CSVs into one date-indexed DataFrame (native granularity).""" |
| macro = pd.DataFrame() |
|
|
| |
| for series_id in config.FRED_SERIES: |
| csv_path = config.MACRO_DIR / f"fred_{series_id}.csv" |
| if not csv_path.exists(): |
| continue |
| try: |
| df = pd.read_csv(csv_path) |
| if "date" not in df.columns: |
| continue |
| df["date"] = pd.to_datetime(df["date"]) |
| non_date = [c for c in df.columns if c != "date"] |
| if not non_date: |
| logger.warning("FRED %s CSV has no value column, skipping.", series_id) |
| continue |
| col = series_id if series_id in df.columns else non_date[0] |
| df = df[["date", col]].rename(columns={col: f"fred_{series_id}"}) |
| df[f"fred_{series_id}"] = pd.to_numeric(df[f"fred_{series_id}"], errors="coerce") |
| if macro.empty: |
| macro = df |
| else: |
| macro = macro.merge(df, on="date", how="outer") |
| except Exception as exc: |
| logger.warning("Could not load FRED %s: %s", series_id, exc) |
|
|
| |
| for commodity_type in ["crude_oil", "natural_gas"]: |
| commodity_dir = config.MACRO_DIR / commodity_type |
| if not commodity_dir.is_dir(): |
| continue |
| for csv_file in sorted(commodity_dir.glob("*.csv")): |
| if "_raw" in csv_file.stem: |
| continue |
| try: |
| df = pd.read_csv(csv_file) |
| date_col = next( |
| (c for c in df.columns if "date" in c.lower() or "period" in c.lower() or "time" in c.lower()), |
| None, |
| ) |
| if date_col is None: |
| continue |
| df[date_col] = pd.to_datetime(df[date_col], errors="coerce") |
| df = df.dropna(subset=[date_col]) |
| num_cols = df.select_dtypes(include="number").columns.tolist() |
| if not num_cols: |
| continue |
| col_name = f"eia_{commodity_type}_{csv_file.stem}" |
| df = df[[date_col, num_cols[0]]].rename(columns={date_col: "date", num_cols[0]: col_name}) |
| if macro.empty: |
| macro = df |
| else: |
| macro = macro.merge(df, on="date", how="outer") |
| except Exception as exc: |
| logger.warning("Could not load EIA %s: %s", csv_file.name, exc) |
|
|
| if not macro.empty: |
| macro = macro.sort_values("date").reset_index(drop=True) |
| return macro |
|
|
|
|
| def _load_universe() -> pd.DataFrame: |
| """Load benchmark_universe.csv.""" |
| path = config.UNIVERSE_DIR / "benchmark_universe.csv" |
| if not path.exists(): |
| raise FileNotFoundError(f"Run Step 1 first: {path}") |
| return pd.read_csv(path) |
|
|
|
|
| def _load_company_info() -> pd.DataFrame: |
| """Load company_info.csv (static metadata only).""" |
| path = config.FUNDAMENTALS_DIR / "company_info.csv" |
| if not path.exists(): |
| return pd.DataFrame() |
| return pd.read_csv(path) |
|
|
|
|
| def _load_filing_metadata(tickers: list[str]) -> dict[str, list[tuple[pd.Timestamp, str, str]]]: |
| """Scan filings directory for .md files, extract (date, type, path). |
| |
| Returns {ticker: [(filing_date, filing_type, rel_path), ...]}, sorted by date. |
| """ |
| lookup: dict[str, list[tuple[pd.Timestamp, str, str]]] = {} |
| for ticker in tickers: |
| ticker_dir = config.FILINGS_DIR / ticker |
| entries: list[tuple[pd.Timestamp, str, str]] = [] |
| if ticker_dir.is_dir(): |
| for md_file in ticker_dir.glob("*.md"): |
| |
| |
| |
| name = md_file.name |
| if "10-K/A" in name: ftype = "10-K/A" |
| elif "10-Q/A" in name: ftype = "10-Q/A" |
| elif "10-K" in name: ftype = "10-K" |
| elif "10-Q" in name: ftype = "10-Q" |
| elif "8-K" in name: ftype = "8-K" |
| elif "20-F" in name: ftype = "20-F" |
| elif "40-F" in name: ftype = "40-F" |
| elif "N-CSRS" in name: ftype = "N-CSRS" |
| elif "N-CSR" in name: ftype = "N-CSR" |
| elif "6-K" in name: ftype = "6-K" |
| elif "DEF 14A" in name or "DEF14A" in name: ftype = "DEF 14A" |
| elif "S-1" in name: ftype = "S-1" |
| elif "11-K" in name: ftype = "11-K" |
| else: ftype = "other" |
| match = re.search(r"(\d{4}-\d{2}-\d{2})", md_file.name) |
| if match: |
| try: |
| fdate = pd.Timestamp(match.group(1)) |
| rel_path = str(md_file.relative_to(config.DATA_DIR)) |
| entries.append((fdate, ftype, rel_path)) |
| except Exception: |
| continue |
| entries.sort(key=lambda x: x[0]) |
| lookup[ticker] = entries |
| return lookup |
|
|
|
|
| def _load_real_estate_summary() -> dict[str, float | int]: |
| """Load raw RE CSVs and compute summary statistics. |
| |
| NOTE: These are static aggregate cross-sectional statistics (counts, |
| means, medians) broadcast identically to every panel row. They do |
| not carry temporal information and introduce negligible data leakage |
| between train/test splits. |
| """ |
| summary: dict[str, float | int] = {} |
| re_dir = config.REAL_ESTATE_DIR |
|
|
| for name in ["properties", "rentals", "sales"]: |
| csv_path = re_dir / f"{name}.csv" |
| if csv_path.exists(): |
| try: |
| df = pd.read_csv(csv_path) |
| summary[f"re_{name}_count"] = len(df) |
| for col in ["price", "rent", "squareFootage", "square_footage", |
| "listPrice", "salePrice", "last_sale_price"]: |
| if col in df.columns: |
| vals = pd.to_numeric(df[col], errors="coerce").dropna() |
| if not vals.empty: |
| summary[f"re_{name}_{col}_mean"] = float(vals.mean()) |
| summary[f"re_{name}_{col}_median"] = float(vals.median()) |
| except Exception as exc: |
| logger.warning("Could not load RE %s: %s", name, exc) |
|
|
| demo_path = re_dir / "demographics.csv" |
| if demo_path.exists(): |
| try: |
| df = pd.read_csv(demo_path) |
| summary["re_demographics_metros"] = len(df) |
| except Exception: |
| pass |
| return summary |
|
|
|
|
| |
| |
| |
|
|
| def _resample_prices(prices: pd.DataFrame, granularity: str) -> pd.DataFrame: |
| """Resample OHLCV+adj_close to target granularity.""" |
| if granularity == "daily": |
| return prices |
|
|
| freq = "W-FRI" if granularity == "weekly" else "MS" |
| agg: dict[str, str] = { |
| "open": "first", |
| "high": "max", |
| "low": "min", |
| "close": "last", |
| "volume": "sum", |
| } |
| if "adj_close" in prices.columns: |
| agg["adj_close"] = "last" |
|
|
| resampled = ( |
| prices |
| .set_index("date") |
| .groupby("ticker") |
| .resample(freq) |
| .agg(agg) |
| .dropna(subset=["close"]) |
| .reset_index() |
| ) |
| return resampled.sort_values(["ticker", "date"]).reset_index(drop=True) |
|
|
|
|
| def _resample_macro(macro: pd.DataFrame, granularity: str) -> pd.DataFrame: |
| """Resample macro data to target granularity. |
| |
| Aggregation rules (matching the plan): |
| - Rates / indices (FRED series): last value in each period |
| - Volume / production EIA series: sum |
| - All other numeric: last |
| """ |
| if macro.empty or granularity == "daily": |
| return macro |
|
|
| freq = "W-FRI" if granularity == "weekly" else "MS" |
|
|
| |
| |
| _sum_keywords = {"export", "import", "production", "reserves"} |
| agg_map: dict[str, str] = {} |
| for col in macro.columns: |
| if col == "date": |
| continue |
| col_lower = col.lower() |
| if any(kw in col_lower for kw in _sum_keywords): |
| agg_map[col] = "sum" |
| else: |
| agg_map[col] = "last" |
|
|
| resampled = ( |
| macro |
| .set_index("date") |
| .resample(freq) |
| .agg(agg_map) |
| .reset_index() |
| ) |
| return resampled.sort_values("date").reset_index(drop=True) |
|
|
|
|
| |
| |
| |
|
|
| def _attach_nearest_filing( |
| panel: pd.DataFrame, |
| filing_lookup: dict[str, list[tuple[pd.Timestamp, str, str]]], |
| ) -> pd.DataFrame: |
| """For each (ticker, date), find the most recent filing as-of that date. |
| |
| Uses ``pd.merge_asof`` for vectorised performance instead of iterrows. |
| """ |
| |
| filing_rows: list[dict] = [] |
| for ticker, entries in filing_lookup.items(): |
| for fdate, ftype, fpath in entries: |
| filing_rows.append({ |
| "ticker": ticker, |
| "filing_date": fdate, |
| "filing_type": ftype, |
| "filing_path": fpath, |
| }) |
|
|
| if not filing_rows: |
| panel["nearest_filing_type"] = None |
| panel["nearest_filing_date"] = pd.NaT |
| panel["nearest_filing_path"] = None |
| panel["days_since_filing"] = np.nan |
| return panel |
|
|
| filings_df = pd.DataFrame(filing_rows) |
| filings_df["filing_date"] = pd.to_datetime(filings_df["filing_date"]) |
| filings_df = filings_df.sort_values("filing_date").reset_index(drop=True) |
|
|
| |
| panel = panel.sort_values("date").reset_index(drop=True) |
| asof_result = pd.merge_asof( |
| panel[["ticker", "date"]], |
| filings_df, |
| left_on="date", |
| right_on="filing_date", |
| by="ticker", |
| direction="backward", |
| ) |
| panel["nearest_filing_type"] = asof_result["filing_type"].values |
| panel["nearest_filing_date"] = pd.to_datetime(asof_result["filing_date"].values) |
| panel["nearest_filing_path"] = asof_result["filing_path"].values |
| panel["days_since_filing"] = (panel["date"] - panel["nearest_filing_date"]).dt.days |
| return panel |
|
|
|
|
| |
| |
| |
|
|
| def _derive_shares_outstanding(panel: pd.DataFrame, company_info: pd.DataFrame) -> pd.Series: |
| """Compute shares_outstanding via the fallback chain. |
| |
| Priority: |
| 1. stmt_shares_outstanding (balance sheet ``Ordinary Shares Number``) |
| 2. stmt_shares_issued (balance sheet ``Share Issued``) |
| 3. stmt_net_income / stmt_basic_eps (income statement derived) |
| 4. Price-derived via Adj Close split-adjustment ratio |
| """ |
| shares = panel.get("stmt_shares_outstanding") |
| if shares is not None: |
| shares = shares.copy() |
| |
| |
| shares = shares.replace(0, np.nan) |
| else: |
| shares = pd.Series(np.nan, index=panel.index) |
|
|
| |
| if "stmt_shares_issued" in panel.columns: |
| mask = shares.isna() |
| issued = panel.loc[mask, "stmt_shares_issued"].replace(0, np.nan) |
| shares.loc[mask] = issued |
|
|
| |
| if "stmt_net_income" in panel.columns and "stmt_basic_eps" in panel.columns: |
| mask = shares.isna() |
| eps = panel.loc[mask, "stmt_basic_eps"].replace(0, np.nan) |
| shares.loc[mask] = panel.loc[mask, "stmt_net_income"] / eps |
|
|
| |
| if "adj_close" in panel.columns and "close" in panel.columns: |
| mask = shares.isna() |
| if mask.any() and not company_info.empty and "marketCap" in company_info.columns: |
| |
| anchor_df = company_info[["ticker", "marketCap"]].dropna().drop_duplicates(subset="ticker") |
| anchor_map = dict(zip(anchor_df["ticker"], anchor_df["marketCap"])) |
|
|
| adj_ratio = panel["close"] / panel["adj_close"].replace(0, np.nan) |
|
|
| |
| |
| tickers_needing_fb4 = panel.loc[mask, "ticker"].unique() |
| tickers_with_anchor = [t for t in tickers_needing_fb4 if t in anchor_map] |
|
|
| if tickers_with_anchor: |
| |
| fb4_mask = mask & panel["ticker"].isin(tickers_with_anchor) |
| fb4_panel = panel.loc[fb4_mask | panel["ticker"].isin(tickers_with_anchor)].copy() |
| fb4_panel["_adj_ratio"] = adj_ratio.loc[fb4_panel.index] |
|
|
| |
| latest_idx = fb4_panel.groupby("ticker")["date"].idxmax() |
| anchor_rows = fb4_panel.loc[latest_idx, ["ticker", "close", "_adj_ratio"]].set_index("ticker") |
|
|
| |
| anchor_info = pd.DataFrame({ |
| "ticker": tickers_with_anchor, |
| "mcap": [anchor_map[t] for t in tickers_with_anchor], |
| }) |
| anchor_info = anchor_info.merge(anchor_rows, on="ticker", how="inner") |
| anchor_info["anchor_shares"] = anchor_info["mcap"] / anchor_info["close"].replace(0, np.nan) |
| anchor_info["anchor_adj_ratio"] = anchor_info["_adj_ratio"] |
| anchor_info = anchor_info.dropna(subset=["anchor_shares", "anchor_adj_ratio"]) |
| anchor_info = anchor_info[anchor_info["anchor_adj_ratio"] != 0] |
|
|
| if not anchor_info.empty: |
| |
| ticker_to_anchor_shares = dict(zip(anchor_info["ticker"], anchor_info["anchor_shares"])) |
| ticker_to_anchor_adj = dict(zip(anchor_info["ticker"], anchor_info["anchor_adj_ratio"])) |
|
|
| applicable = mask & panel["ticker"].isin(anchor_info["ticker"]) |
| if applicable.any(): |
| tk_series = panel.loc[applicable, "ticker"] |
| a_shares = tk_series.map(ticker_to_anchor_shares) |
| a_adj = tk_series.map(ticker_to_anchor_adj) |
| historical = a_shares / (adj_ratio.loc[applicable] / a_adj) |
| shares.loc[applicable] = historical |
|
|
| |
| |
| |
| if "close" in panel.columns: |
| _close = panel.groupby("ticker")["close"].transform("last") |
| _mcap = shares * _close |
| insane = _mcap > 5e12 |
| if insane.any(): |
| tickers_insane = panel.loc[insane, "ticker"].unique() |
| for t in tickers_insane: |
| tmask = panel["ticker"] == t |
| while (shares.loc[tmask] * _close.loc[tmask]).max() > 5e12: |
| shares.loc[tmask] = shares.loc[tmask] / 1000 |
| logger.warning( |
| "Ticker %s: shares_outstanding corrected (XBRL unit error)", t |
| ) |
|
|
| |
| neg_mask = shares < 0 |
| if neg_mask.any(): |
| bad_tickers = panel.loc[neg_mask, "ticker"].unique() |
| logger.warning( |
| "Negative shares_outstanding for %d rows (%s) — setting to NaN.", |
| neg_mask.sum(), list(bad_tickers), |
| ) |
| shares.loc[neg_mask] = np.nan |
|
|
| |
| huge_mask = shares > 10e9 |
| if huge_mask.any(): |
| bad_tickers = panel.loc[huge_mask, "ticker"].unique() |
| logger.warning( |
| "shares_outstanding > 10B for %d rows (%s) — setting to NaN.", |
| huge_mask.sum(), list(bad_tickers), |
| ) |
| shares.loc[huge_mask] = np.nan |
|
|
| return shares |
|
|
|
|
| def _compute_derived_metrics(panel: pd.DataFrame, granularity: str = "daily") -> pd.DataFrame: |
| """Add time-varying derived value-estimation columns. |
| |
| Uses TTM (trailing-twelve-month) values for flow metrics (revenue, |
| net income, EBITDA, FCF) so that ratios like P/E reflect annualised |
| earnings, not a single quarter. Falls back to single-quarter values |
| if TTM columns are unavailable. |
| """ |
| out = panel.copy() |
|
|
| so = out.get("shares_outstanding") |
| if so is None: |
| return out |
|
|
| close = out["close"] |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| if "adj_close" in out.columns: |
| adj_close_safe = out["adj_close"].replace(0, np.nan) |
| |
| adj_close_safe = adj_close_safe.where(adj_close_safe > 0) |
| split_ratio = close / adj_close_safe |
| |
| needs_adj = ((split_ratio > 1.5) | (split_ratio < 0.67)) & (split_ratio > 0) |
| if needs_adj.any(): |
| so = so.copy() |
| so.loc[needs_adj] = so.loc[needs_adj] / split_ratio.loc[needs_adj] |
| n_adj = needs_adj.sum() |
| n_tickers = out.loc[needs_adj, "ticker"].nunique() |
| logger.info( |
| "Split-adjusted shares_outstanding for %d rows (%d tickers) " |
| "using close/adj_close ratio.", |
| n_adj, n_tickers, |
| ) |
|
|
| out["derived_market_cap"] = close * so |
| |
| |
| neg_mc = out["derived_market_cap"] < 0 |
| if neg_mc.any(): |
| n_neg = neg_mc.sum() |
| n_t = out.loc[neg_mc, "ticker"].nunique() |
| logger.warning("Negative derived_market_cap for %d rows (%d tickers) — setting to NaN.", n_neg, n_t) |
| out.loc[neg_mc, "derived_market_cap"] = np.nan |
|
|
| |
| |
| |
| |
| |
| _MCAP_CEILING = 100e9 |
| mcap_insane = out["derived_market_cap"] > _MCAP_CEILING |
| if mcap_insane.any(): |
| n_insane = mcap_insane.sum() |
| tickers_insane = out.loc[mcap_insane, "ticker"].nunique() |
| logger.warning( |
| "derived_market_cap > $%.0fB for %d rows (%d tickers) — setting to NaN.", |
| _MCAP_CEILING / 1e9, n_insane, tickers_insane, |
| ) |
| out.loc[mcap_insane, "derived_market_cap"] = np.nan |
|
|
| def _col(name: str) -> pd.Series | None: |
| """Return TTM column if available, else quarterly, else None.""" |
| ttm = f"{name}_ttm" |
| if ttm in out.columns: |
| return out[ttm] |
| if name in out.columns: |
| return out[name] |
| return None |
|
|
| ni = _col("stmt_net_income") |
| if ni is not None: |
| |
| |
| |
| ni_safe = ni.where(ni > 0) |
| out["derived_pe"] = out["derived_market_cap"] / ni_safe |
|
|
| if "stmt_total_debt" in out.columns and "stmt_cash" in out.columns: |
| out["derived_ev"] = out["derived_market_cap"] + out["stmt_total_debt"].fillna(0) - out["stmt_cash"].fillna(0) |
|
|
| rev = _col("stmt_revenue") |
| if "derived_ev" in out.columns and rev is not None: |
| out["derived_ev_to_revenue"] = out["derived_ev"] / rev.replace(0, np.nan) |
|
|
| ebitda = _col("stmt_ebitda") |
| if "derived_ev" in out.columns and ebitda is not None: |
| out["derived_ev_to_ebitda"] = out["derived_ev"] / ebitda.replace(0, np.nan) |
|
|
| fcf = _col("stmt_free_cashflow") |
| if fcf is not None: |
| out["derived_fcf_yield"] = fcf / out["derived_market_cap"].replace(0, np.nan) |
|
|
| if "stmt_total_equity" in out.columns: |
| out["derived_pb"] = out["derived_market_cap"] / out["stmt_total_equity"].replace(0, np.nan) |
|
|
| if "stmt_total_debt" in out.columns and "stmt_total_equity" in out.columns: |
| out["derived_debt_to_equity"] = out["stmt_total_debt"] / out["stmt_total_equity"].replace(0, np.nan) |
|
|
| |
|
|
| |
| tax = _col("stmt_tax_provision") |
| pretax = _col("stmt_pretax_income") |
| if tax is not None and pretax is not None: |
| out["derived_effective_tax_rate"] = ( |
| tax.abs() / pretax.replace(0, np.nan) |
| ).clip(0.0, 0.50) |
|
|
| |
| int_exp = _col("stmt_interest_expense") |
| if int_exp is not None and "stmt_total_debt" in out.columns: |
| out["derived_cost_of_debt"] = ( |
| int_exp.abs() / out["stmt_total_debt"].replace(0, np.nan) |
| ).clip(0.0, 0.20) |
|
|
| |
| if "fred_SP500" in out.columns and "close" in out.columns: |
| _gran = granularity |
| if _gran == "monthly": |
| _beta_window, _beta_min = 36, 12 |
| elif _gran == "weekly": |
| _beta_window, _beta_min = 52, 13 |
| else: |
| _beta_window, _beta_min = config.BETA_LOOKBACK_DAYS, 60 |
| out["derived_beta"] = np.nan |
| for tk, grp in out.groupby("ticker", sort=False): |
| if len(grp) < _beta_min: |
| continue |
| stk_ret = grp["close"].pct_change() |
| mkt_ret = grp["fred_SP500"].pct_change() |
| |
| cov_sm = stk_ret.rolling(_beta_window, min_periods=_beta_min).cov(mkt_ret) |
| var_m = mkt_ret.rolling(_beta_window, min_periods=_beta_min).var() |
| beta = (cov_sm / var_m.replace(0, np.nan)).clip(0.1, 4.0) |
| out.loc[grp.index, "derived_beta"] = beta |
|
|
| |
| if "derived_beta" in out.columns and "fred_DGS10" in out.columns: |
| rf = out["fred_DGS10"].ffill() / 100.0 |
| ke = rf + out["derived_beta"].fillna(1.0) * config.MARKET_RISK_PREMIUM |
|
|
| kd = out.get("derived_cost_of_debt") |
| if kd is None: |
| kd = rf + 0.02 |
|
|
| t = out.get("derived_effective_tax_rate") |
| if t is None: |
| t = 0.21 |
|
|
| if "stmt_total_debt" in out.columns and "derived_market_cap" in out.columns: |
| d = out["stmt_total_debt"].fillna(0) |
| e = out["derived_market_cap"].fillna(0) |
| total = (d + e).replace(0, np.nan) |
| d_w = d / total |
| e_w = e / total |
| out["derived_wacc"] = (e_w * ke + d_w * kd * (1 - t)).clip(0.03, 0.25) |
|
|
| |
|
|
| |
| gp = _col("stmt_gross_profit") |
| if gp is not None and rev is not None: |
| out["derived_gross_margin"] = (gp / rev.replace(0, np.nan)).clip(-1, 1) |
|
|
| |
| if ebitda is not None and rev is not None: |
| out["derived_ebitda_margin"] = (ebitda / rev.replace(0, np.nan)).clip(-2, 2) |
|
|
| |
| if ni is not None and rev is not None: |
| out["derived_net_margin"] = (ni / rev.replace(0, np.nan)).clip(-2, 2) |
|
|
| |
| cogs = _col("stmt_cogs") |
| if cogs is not None and rev is not None: |
| out["derived_cogs_pct"] = (cogs / rev.replace(0, np.nan)).clip(0, 2) |
|
|
| |
| if rev is not None: |
| if granularity == "monthly": |
| lag_periods = 12 |
| elif granularity == "weekly": |
| lag_periods = 52 |
| else: |
| lag_periods = 252 |
| out["derived_rev_growth_yoy"] = np.nan |
| for tk, grp in out.groupby("ticker", sort=False): |
| rev_vals = rev.loc[grp.index] |
| rev_lag = rev_vals.shift(lag_periods) |
| growth = (rev_vals - rev_lag) / rev_lag.replace(0, np.nan) |
| out.loc[grp.index, "derived_rev_growth_yoy"] = growth.clip(-5, 50) |
|
|
| |
| if "stmt_current_assets" in out.columns and "stmt_current_liabilities" in out.columns: |
| cl = out["stmt_current_liabilities"].replace(0, np.nan) |
| out["derived_current_ratio"] = (out["stmt_current_assets"] / cl).clip(0, 50) |
|
|
| return out |
|
|
|
|
| |
| |
| |
|
|
| def _build_column_roles(columns: list[str]) -> dict[str, list[str]]: |
| """Classify panel columns into roles based on naming convention.""" |
| roles: dict[str, list[str]] = { |
| "target": [], |
| "endogenous": [], |
| "exogenous_fundamental": [], |
| "exogenous_statement": [], |
| "exogenous_macro": [], |
| "exogenous_commodity": [], |
| "context_filing": [], |
| "context_real_estate": [], |
| "metadata": [], |
| } |
| for c in columns: |
| if c == "close": |
| roles["target"].append(c) |
| elif c in ("open", "high", "low", "volume", "adj_close"): |
| roles["endogenous"].append(c) |
| elif c.startswith("derived_") or c == "shares_outstanding": |
| roles["exogenous_fundamental"].append(c) |
| elif c.startswith("stmt_"): |
| roles["exogenous_statement"].append(c) |
| elif c.startswith("fred_"): |
| roles["exogenous_macro"].append(c) |
| elif c.startswith("eia_"): |
| roles["exogenous_commodity"].append(c) |
| elif c.startswith("nearest_filing") or c == "days_since_filing": |
| roles["context_filing"].append(c) |
| elif c.startswith("re_"): |
| roles["context_real_estate"].append(c) |
| else: |
| roles["metadata"].append(c) |
| return roles |
|
|
|
|
| |
| |
| |
|
|
| def run(granularity: str | None = None) -> pd.DataFrame: |
| """Execute Layer 2 preprocessing and return the merged panel DataFrame. |
| |
| Parameters |
| ---------- |
| granularity : str, optional |
| ``"daily"``, ``"weekly"``, or ``"monthly"``. |
| Defaults to ``config.GRANULARITY``. |
| """ |
| if granularity is None: |
| granularity = config.GRANULARITY |
|
|
| out_dir = config.DATA_DIR / "processed" / granularity |
| out_dir.mkdir(parents=True, exist_ok=True) |
|
|
| |
| logger.info("Loading raw data ...") |
| prices_raw = _load_prices() |
| universe = _load_universe() |
| company_info = _load_company_info() |
| macro_raw = _load_macro_raw() |
|
|
| |
| if config.EXCLUDED_TICKERS: |
| prices_raw = prices_raw[~prices_raw["ticker"].isin(config.EXCLUDED_TICKERS)] |
| tickers = prices_raw["ticker"].unique().tolist() |
| logger.info("Loaded prices: %d rows, %d tickers.", len(prices_raw), len(tickers)) |
|
|
| |
| logger.info("Resampling to %s ...", granularity) |
| prices = _resample_prices(prices_raw, granularity) |
| macro = _resample_macro(macro_raw, granularity) |
| logger.info("Resampled prices: %d rows.", len(prices)) |
|
|
| |
| panel = prices.copy() |
|
|
| |
| static_cols = ["ticker", "sector", "industry", "exchange", |
| "in_russell_2000", "lower_end_russell2000", "small_cap_outside"] |
| static_cols = [c for c in static_cols if c in universe.columns] |
| panel = panel.merge(universe[static_cols], on="ticker", how="left") |
|
|
| |
| |
| |
| |
| |
| |
| if not company_info.empty: |
| info_static = ["ticker"] |
| for col in ["sector", "industry", "fullTimeEmployees"]: |
| if col in company_info.columns: |
| if col not in panel.columns: |
| info_static.append(col) |
| else: |
| |
| |
| ci_map = company_info.set_index("ticker")[col].dropna() |
| null_mask = panel[col].isna() |
| if null_mask.any(): |
| filled = panel.loc[null_mask, "ticker"].map(ci_map) |
| panel.loc[null_mask, col] = filled |
| n_filled = filled.notna().sum() |
| if n_filled > 0: |
| logger.info("Filled %d NaN %s values from company_info.", n_filled, col) |
| if len(info_static) > 1: |
| panel = panel.merge(company_info[info_static], on="ticker", how="left") |
| |
|
|
| |
| |
| _EXCHANGE_NORMALIZE: dict[str, str] = { |
| "Nyse Mkt Llc": "NYSE MKT", |
| "NYSE_MKT": "NYSE MKT", |
| "Non-Nms Quotation Service (Nnqs)": "OTC", |
| "NO MARKET (E.G. UNLISTED)": "OTC", |
| } |
| if "exchange" in panel.columns: |
| panel["exchange"] = panel["exchange"].replace(_EXCHANGE_NORMALIZE) |
|
|
| |
| |
| |
| |
| _SECTOR_NORMALIZE: dict[str, str] = { |
| "Financial Services": "Financials", |
| "Healthcare": "Health Care", |
| "Consumer Cyclical": "Consumer Discretionary", |
| "Technology": "Information Technology", |
| "Basic Materials": "Materials", |
| "Communication Services": "Communication", |
| "Consumer Defensive": "Consumer Staples", |
| } |
| if "sector" in panel.columns: |
| before_unique = panel["sector"].nunique() |
| panel["sector"] = panel["sector"].replace(_SECTOR_NORMALIZE) |
| after_unique = panel["sector"].nunique() |
| if before_unique != after_unique: |
| logger.info("Normalized sector names: %d → %d unique values (GICS convention).", |
| before_unique, after_unique) |
|
|
| |
| |
| |
| |
| if "industry" in panel.columns and "sector" in panel.columns: |
| mode_map = panel.dropna(subset=["industry","sector"]).groupby("industry")["sector"].agg( |
| lambda x: x.mode().iloc[0] if len(x.mode()) > 0 else None |
| ) |
| has_ind = panel["industry"].notna() |
| if has_ind.any(): |
| panel.loc[has_ind, "sector"] = panel.loc[has_ind, "industry"].map(mode_map).fillna(panel.loc[has_ind, "sector"]) |
| logger.info("Applied industry→sector modal normalization.") |
|
|
| logger.info("Merged static metadata.") |
|
|
| |
| |
| logger.info("Loading per-ticker financial statements (yfinance) ...") |
| yf_frames: list[pd.DataFrame] = [] |
| for ticker in tickers: |
| stmt = _load_statement_long(ticker) |
| if not stmt.empty: |
| yf_frames.append(stmt) |
|
|
| |
| logger.info("Loading XBRL historical statements ...") |
| xbrl_stmts = _load_xbrl_statements(tickers) |
|
|
| |
| |
| all_stmts: pd.DataFrame | None = None |
|
|
| if not xbrl_stmts.empty: |
| all_stmts = xbrl_stmts |
|
|
| if yf_frames: |
| yf_all = pd.concat(yf_frames, ignore_index=True) |
| if all_stmts is not None: |
| |
| all_stmt_cols = sorted( |
| {c for c in all_stmts.columns if c.startswith("stmt_")} |
| | {c for c in yf_all.columns if c.startswith("stmt_")} |
| ) |
| for c in all_stmt_cols: |
| if c not in all_stmts.columns: |
| all_stmts[c] = np.nan |
| if c not in yf_all.columns: |
| yf_all[c] = np.nan |
|
|
| |
| combined = pd.concat([all_stmts, yf_all], ignore_index=True) |
| combined = combined.sort_values("date") |
| combined = combined.drop_duplicates( |
| subset=["ticker", "date"], keep="last", |
| ) |
| all_stmts = combined |
| else: |
| all_stmts = yf_all |
|
|
| if all_stmts is not None and not all_stmts.empty: |
| stmt_cols = [c for c in all_stmts.columns if c.startswith("stmt_")] |
|
|
| |
| |
| |
| |
| |
| all_stmts = all_stmts.sort_values(["ticker", "date"]).reset_index(drop=True) |
| for col in stmt_cols: |
| all_stmts[col] = all_stmts.groupby("ticker")[col].ffill() |
| |
| |
| |
| all_stmts[col] = all_stmts.groupby("ticker")[col].bfill() |
|
|
| |
| |
| |
| |
| all_stmts["date"] = pd.to_datetime(all_stmts["date"], errors="coerce") |
| panel["date"] = pd.to_datetime(panel["date"], errors="coerce") |
| all_stmts = all_stmts.dropna(subset=["date"]).sort_values("date").reset_index(drop=True) |
| panel = panel.dropna(subset=["date"]).sort_values("date").reset_index(drop=True) |
| |
| |
| |
| if all(c in all_stmts.columns for c in ["stmt_total_assets", "stmt_total_liabilities", "stmt_total_equity"]): |
| A = all_stmts["stmt_total_assets"] |
| L = all_stmts["stmt_total_liabilities"] |
| E = all_stmts["stmt_total_equity"] |
| all_present = A.notna() & L.notna() & E.notna() |
| rel_err = ((A - L.fillna(0) - E.fillna(0)).abs() / A.abs().replace(0, np.nan)) |
| bad = all_present & (rel_err > 0.01) |
| if bad.any(): |
| n = bad.sum() |
| all_stmts.loc[bad, ["stmt_total_assets", "stmt_total_liabilities", "stmt_total_equity"]] = np.nan |
| logger.info("Combined statements: dropped A/L/E for %d rows with balance mismatch > 1%% (post-combine).", n) |
|
|
| panel = pd.merge_asof( |
| panel, all_stmts[["ticker", "date"] + stmt_cols], |
| on="date", by="ticker", direction="backward", |
| ) |
| logger.info("Merged statement financials (%d metrics) via as-of join.", len(stmt_cols)) |
|
|
| |
| |
| |
| |
| |
| sanity_rules = [ |
| ("stmt_revenue", "< 0", lambda s: s < 0), |
| ("stmt_revenue_ttm", "< 0", lambda s: s < 0), |
| ("stmt_total_assets", "<= 0", lambda s: s <= 0), |
| ("stmt_total_liabilities", "< 0", lambda s: s < 0), |
| ] |
| panel = panel.sort_values(["ticker", "date"]) |
| for col, rule_name, rule_fn in sanity_rules: |
| if col not in panel.columns: |
| continue |
| bad = rule_fn(panel[col]) & panel[col].notna() |
| if not bad.any(): |
| continue |
| n_bad = int(bad.sum()) |
| |
| panel.loc[bad, col] = np.nan |
| panel[col] = panel.groupby("ticker")[col].ffill() |
| |
| still_bad = rule_fn(panel[col]) & panel[col].notna() |
| if still_bad.any(): |
| panel.loc[still_bad, col] = np.nan |
| remaining = panel[col].isna().sum() |
| logger.info("Sanity fix %s %s: %d bad values recovered via per-ticker forward-fill (final nulls: %d)", |
| col, rule_name, n_bad, remaining) |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| ble_cols = ["stmt_total_assets", "stmt_total_liabilities", "stmt_total_equity"] |
| if all(c in panel.columns for c in ble_cols): |
| |
| |
| |
| |
| panel = panel.sort_values(["ticker", "date"]).reset_index(drop=True) |
|
|
| A = panel["stmt_total_assets"] |
| L = panel["stmt_total_liabilities"] |
| E = panel["stmt_total_equity"] |
| all_present = A.notna() & L.notna() & E.notna() |
| rel_err = ((A - L.fillna(0) - E.fillna(0)).abs() / A.abs().replace(0, np.nan)) |
| good = all_present & (rel_err <= 0.01) |
| n_bad_initial = int((all_present & ~good).sum()) |
|
|
| |
| |
| |
| idx_series = pd.Series(panel.index.to_numpy(), index=panel.index) |
| good_idx = idx_series.where(good) |
| last_good = good_idx.groupby(panel["ticker"]).ffill() |
|
|
| fill_mask = (~good) & last_good.notna() |
| if fill_mask.any(): |
| src_idx = last_good[fill_mask].astype(int).to_numpy() |
| dst_idx = panel.index[fill_mask].to_numpy() |
| for c in ble_cols: |
| panel.loc[dst_idx, c] = panel[c].to_numpy()[src_idx] |
|
|
| orphan_mask = (~good) & last_good.isna() |
| if orphan_mask.any(): |
| panel.loc[orphan_mask, ble_cols] = np.nan |
|
|
| |
| A2 = panel["stmt_total_assets"] |
| L2 = panel["stmt_total_liabilities"] |
| E2 = panel["stmt_total_equity"] |
| all2 = A2.notna() & L2.notna() & E2.notna() |
| rel2 = ((A2 - L2.fillna(0) - E2.fillna(0)).abs() |
| / A2.abs().replace(0, np.nan)) |
| residual = int((all2 & (rel2 > 0.01)).sum()) |
| logger.info( |
| "Balance-eq residual purge: %d bad → %d filled / %d orphan-nulled" |
| " / %d residual (post-fix verify)", |
| n_bad_initial, int(fill_mask.sum()), int(orphan_mask.sum()), residual, |
| ) |
| panel = panel.reset_index(drop=True) |
| else: |
| logger.warning("No statement financials loaded.") |
|
|
| |
| if not macro.empty: |
| macro = macro.sort_values("date").reset_index(drop=True) |
| macro_cols = [c for c in macro.columns if c != "date"] |
| macro[macro_cols] = macro[macro_cols].ffill() |
| panel = panel.sort_values("date").reset_index(drop=True) |
| panel = pd.merge_asof(panel, macro, on="date", direction="backward") |
| logger.info("Merged macro data (%d series).", len(macro_cols)) |
| else: |
| logger.warning("No macro data loaded.") |
|
|
| |
| filing_lookup = _load_filing_metadata(tickers) |
| tickers_with_filings = sum(1 for v in filing_lookup.values() if v) |
| if tickers_with_filings > 0: |
| panel = _attach_nearest_filing(panel, filing_lookup) |
| logger.info("Attached filing context (%d tickers have filings).", tickers_with_filings) |
| else: |
| logger.warning("No filings found for any ticker.") |
| panel["nearest_filing_type"] = None |
| panel["nearest_filing_date"] = pd.NaT |
| panel["nearest_filing_path"] = None |
| panel["days_since_filing"] = np.nan |
|
|
| |
| |
| |
| |
| |
|
|
| |
| logger.info("Deriving time-varying metrics ...") |
| panel["shares_outstanding"] = _derive_shares_outstanding(panel, company_info) |
| panel = _compute_derived_metrics(panel, granularity=granularity) |
|
|
| |
| |
| |
| |
| |
| |
|
|
| |
| panel["label"] = "other" |
| if "lower_end_russell2000" in panel.columns: |
| panel.loc[panel["lower_end_russell2000"] == True, "label"] = "lower_end_r2k" |
| if "small_cap_outside" in panel.columns: |
| panel.loc[panel["small_cap_outside"] == True, "label"] = "small_cap_outside" |
|
|
| |
| panel = panel.sort_values(["ticker", "date"]).reset_index(drop=True) |
|
|
| |
| panel.to_parquet(out_dir / "panel.parquet", index=False) |
|
|
| col_roles = _build_column_roles(list(panel.columns)) |
| (out_dir / "columns.json").write_text(json.dumps(col_roles, indent=2)) |
|
|
| logger.info( |
| "Panel saved: %d rows, %d tickers, %d columns at %s granularity. -> %s", |
| len(panel), panel["ticker"].nunique(), len(panel.columns), |
| granularity, out_dir / "panel.parquet", |
| ) |
| return panel |
|
|