diff --git "a/sec_parser/sec_parser.py" "b/sec_parser/sec_parser.py" new file mode 100644--- /dev/null +++ "b/sec_parser/sec_parser.py" @@ -0,0 +1,15384 @@ +#!/usr/bin/env python + +import sys +import re +import pathlib +import textwrap +import pandas as pd +import numpy as np +import math +import io +from bs4 import BeautifulSoup, NavigableString, UnicodeDammit, Tag +import traceback +import datetime +from typing import Any, Dict, List, Optional, Tuple +import argparse +from urllib.parse import quote, unquote, urljoin +import html +import itertools +import unicodedata +import binascii +import logging +import hashlib +import fcntl + +try: + from .hardcodes import apply_markdown_hardcodes +except ImportError: + from hardcodes import apply_markdown_hardcodes + +from collections import defaultdict +from statistics import median + +import os, io, json, time, traceback, requests + +from dotenv import load_dotenv +SEC_PARSER_DOTENV_PATH = pathlib.Path(__file__).resolve().parent / ".env" + + +def _load_sec_parser_env() -> None: + load_dotenv() + if SEC_PARSER_DOTENV_PATH.exists(): + load_dotenv(SEC_PARSER_DOTENV_PATH, override=False) + + +_load_sec_parser_env() +from pydantic import BaseModel, Field, ConfigDict, ValidationError +from PyPDF2 import PdfReader, PdfWriter +from PyPDF2.errors import PdfReadError +from mistralai import Mistral, DocumentURLChunk +from mistralai.extra import response_format_from_pydantic_model + +try: + from .special_chars import WINGDINGS_MAP +except ImportError: + from special_chars import WINGDINGS_MAP + +import random +from mistralai.models.sdkerror import SDKError + +import fitz +import base64 + +import imgkit +from playwright.sync_api import sync_playwright + +try: + from .config import Config +except ImportError: + from config import Config + +log_file_path = pathlib.Path(__file__).parent / 'sec_parser_errors.log' +ocr_log_file_path = pathlib.Path(__file__).resolve().parent.parent / 'pdf_files.log' +parse_stats_log_file_path = pathlib.Path(__file__).parent / 'sec_parser_parse_stats.jsonl' +parse_stats_summary_file_path = pathlib.Path(__file__).parent / 'sec_parser_parse_stats_summary.json' + +logger = logging.getLogger() +logger.setLevel(logging.ERROR) + +handler = logging.FileHandler(log_file_path, mode='a', encoding='utf-8') +formatter = logging.Formatter('%(asctime)s\n%(message)s\n' + '-'*80, datefmt='%Y-%m-%d %H:%M:%S') + +handler.setFormatter(formatter) +logger.addHandler(handler) + +ocr_logger = logging.getLogger("ocr_tracker") +ocr_logger.setLevel(logging.INFO) +ocr_logger.propagate = False + +ocr_handler = logging.FileHandler(ocr_log_file_path, mode='a', encoding='utf-8') +ocr_handler.setFormatter(formatter) +ocr_logger.addHandler(ocr_handler) + +DEFAULT_FONT = 16.0 + +CURRENT_PROCESSING_FILE = "Unknown" +CURRENT_OCR_LOGGED_FILINGS = set() +CURRENT_SOURCE_DOCUMENT_URL = None +LAST_PARSE_STATS = None +LAST_POSITIONED_HTML_OCR_PAGE_COUNT = 0 +_TIKTOKEN_ENCODING = None + + +def _is_debug_enabled() -> bool: + return os.getenv("SEC_PARSER_DEBUG", "0").strip().lower() in {"1", "true", "yes", "on"} + + +def _debug_print(*args, **kwargs) -> None: + if _is_debug_enabled(): + print(*args, **kwargs) + + +def estimate_parser_tokens(text: str) -> int: + """Count output tokens with tiktoken when available, falling back to a stable approximation.""" + global _TIKTOKEN_ENCODING + text = text or "" + if not text: + return 0 + if _TIKTOKEN_ENCODING is None: + try: + import tiktoken + + _TIKTOKEN_ENCODING = tiktoken.get_encoding("cl100k_base") + except Exception: + _TIKTOKEN_ENCODING = False + if _TIKTOKEN_ENCODING: + try: + return len(_TIKTOKEN_ENCODING.encode(text)) + except Exception: + pass + return max(1, len(re.findall(r"\w+|[^\w\s]", text, flags=re.UNICODE))) + + +def normalize_form_type_for_stats(form_type: str) -> str: + form = (form_type or "unknown").strip().upper() + if not form: + return "unknown" + base = form[:-2] if form.endswith("/A") else form + compact = re.sub(r"[^A-Z0-9]", "", base) + if compact in {"8K", "10K", "10Q"}: + return compact.lower() + if compact.startswith("NPORT"): + return "nport" + if compact.startswith("NCEN"): + return "ncen" + if compact.startswith("NPX"): + return "npx" + if compact.startswith("NMFP"): + return "nmfp" + if compact.startswith("13F"): + return "13f" + if compact in {"3", "4", "5"}: + return compact + return compact.lower() or "unknown" + + +def _pct(value: int, total: int) -> float: + if total <= 0: + return 0.0 + return round((float(value) / float(total)) * 100.0, 4) + + +def _new_parse_stats(filepath: pathlib.Path) -> Dict[str, Any]: + return { + "input_path": str(filepath), + "source_document_url": CURRENT_SOURCE_DOCUMENT_URL, + "accession_number": _current_filing_accession_number(), + "form_type": "", + "form_category": "unknown", + "format_token_counts": {bucket: 0 for bucket in ("sgml", "html", "xml", "pdf", "text", "other")}, + "format_char_counts": {bucket: 0 for bucket in ("sgml", "html", "xml", "pdf", "text", "other")}, + "format_section_counts": {bucket: 0 for bucket in ("sgml", "html", "xml", "pdf", "text", "other")}, + "pdf_page_count": 0, + "parts": [], + } + + +def _record_parse_stats_part(stats: Optional[Dict[str, Any]], source_format: str, text: str, label: str = "") -> None: + if not stats or not text or not str(text).strip(): + return + bucket = (source_format or "other").strip().lower() + if bucket not in stats["format_token_counts"]: + bucket = "other" + token_count = estimate_parser_tokens(text) + char_count = len(text) + stats["format_token_counts"][bucket] += token_count + stats["format_char_counts"][bucket] += char_count + stats["format_section_counts"][bucket] += 1 + stats["parts"].append( + { + "source_format": bucket, + "label": label or bucket, + "token_count": token_count, + "char_count": char_count, + } + ) + + +def _record_parse_stats_pdf_pages(stats: Optional[Dict[str, Any]], page_count: int) -> None: + if not stats: + return + stats["pdf_page_count"] = int(stats.get("pdf_page_count") or 0) + max(0, int(page_count or 0)) + + +def _finalize_parse_stats(stats: Optional[Dict[str, Any]], document_text: str, form_type: str) -> Dict[str, Any]: + stats = stats or _new_parse_stats(pathlib.Path(CURRENT_PROCESSING_FILE or "unknown")) + form_type = (form_type or stats.get("form_type") or "unknown").strip().upper() or "unknown" + stats["form_type"] = form_type + stats["form_category"] = normalize_form_type_for_stats(form_type) + source_total = sum(int(v) for v in stats.get("format_token_counts", {}).values()) + stats["source_token_count"] = source_total + stats["source_char_count"] = sum(int(v) for v in stats.get("format_char_counts", {}).values()) + stats["format_token_percentages"] = { + key: _pct(int(value), source_total) + for key, value in sorted(stats.get("format_token_counts", {}).items()) + } + pdf_page_count = int(stats.get("pdf_page_count") or 0) + pdf_output_token_count = int((stats.get("format_token_counts") or {}).get("pdf") or 0) + stats["pdf_page_count"] = pdf_page_count + stats["pdf_output_token_count"] = pdf_output_token_count + stats["pdf_output_tokens_per_page"] = ( + round(float(pdf_output_token_count) / float(pdf_page_count), 4) + if pdf_page_count > 0 + else None + ) + stats["initial_document_token_count"] = estimate_parser_tokens(document_text or "") + stats["initial_document_char_count"] = len(document_text or "") + return stats + + +def _complete_parse_stats_for_output( + stats: Optional[Dict[str, Any]], + *, + output_path: pathlib.Path, + final_markdown: str, + to_mmd: bool, +) -> Dict[str, Any]: + stats = dict(stats or _new_parse_stats(pathlib.Path(CURRENT_PROCESSING_FILE or "unknown"))) + stats["output_path"] = str(output_path) + stats["to_mmd"] = bool(to_mmd) + stats["final_token_count"] = estimate_parser_tokens(final_markdown or "") + stats["final_char_count"] = len(final_markdown or "") + return stats + + +def _write_parse_stats_outputs(stats: Dict[str, Any], output_path: pathlib.Path) -> None: + sidecar_path = output_path.with_suffix(".parse_stats.json") + sidecar_path.write_text(json.dumps(stats, indent=2, sort_keys=True), encoding="utf-8") + if os.getenv("SEC_PARSER_DISABLE_GLOBAL_STATS", "").strip().lower() in {"1", "true", "yes", "on"}: + return + with parse_stats_log_file_path.open("a", encoding="utf-8") as handle: + handle.write(json.dumps(stats, sort_keys=True) + "\n") + _refresh_parse_stats_summary() + +_PARSE_STATS_SUMMARY_CACHE: Dict[str, Any] = { + "path": "", + "size": 0, + "latest_by_input": {}, +} + +def _refresh_parse_stats_summary() -> None: + latest_by_input: Dict[str, Dict[str, Any]] + log_path = parse_stats_log_file_path + cache_path = str(log_path) + current_size = log_path.stat().st_size if log_path.exists() else 0 + cached_path = str(_PARSE_STATS_SUMMARY_CACHE.get("path") or "") + cached_size = int(_PARSE_STATS_SUMMARY_CACHE.get("size") or 0) + if cache_path == cached_path and current_size >= cached_size: + latest_by_input = dict(_PARSE_STATS_SUMMARY_CACHE.get("latest_by_input") or {}) + read_from = cached_size + else: + latest_by_input = {} + read_from = 0 + + if log_path.exists(): + with log_path.open("r", encoding="utf-8", errors="replace") as handle: + if read_from: + handle.seek(read_from) + for line in handle: + if not line.strip(): + continue + try: + record = json.loads(line) + except Exception: + continue + key = str(record.get("input_path") or record.get("accession_number") or record.get("output_path") or "") + if key: + latest_by_input[key] = record + _PARSE_STATS_SUMMARY_CACHE["path"] = cache_path + _PARSE_STATS_SUMMARY_CACHE["size"] = current_size + _PARSE_STATS_SUMMARY_CACHE["latest_by_input"] = dict(latest_by_input) + else: + _PARSE_STATS_SUMMARY_CACHE["path"] = cache_path + _PARSE_STATS_SUMMARY_CACHE["size"] = 0 + _PARSE_STATS_SUMMARY_CACHE["latest_by_input"] = {} + records = list(latest_by_input.values()) + total_filings = len(records) + form_counts: Dict[str, int] = defaultdict(int) + form_token_counts: Dict[str, int] = defaultdict(int) + format_token_counts: Dict[str, int] = defaultdict(int) + total_pdf_page_count = 0 + total_pdf_output_token_count = 0 + total_tokens = 0 + pdf_output_tokens_per_page_values: List[float] = [] + for record in records: + form = str(record.get("form_category") or normalize_form_type_for_stats(str(record.get("form_type") or ""))) + tokens = int(record.get("final_token_count") or record.get("initial_document_token_count") or 0) + form_counts[form] += 1 + form_token_counts[form] += tokens + total_tokens += tokens + pdf_page_count = int(record.get("pdf_page_count") or 0) + pdf_output_token_count = int( + record.get("pdf_output_token_count") + or (record.get("format_token_counts") or {}).get("pdf") + or 0 + ) + total_pdf_page_count += pdf_page_count + total_pdf_output_token_count += pdf_output_token_count + pdf_output_tokens_per_page = record.get("pdf_output_tokens_per_page") + if pdf_output_tokens_per_page is None and pdf_page_count > 0: + pdf_output_tokens_per_page = round( + float(pdf_output_token_count) / float(pdf_page_count), + 4, + ) + if pdf_output_tokens_per_page is not None: + pdf_output_tokens_per_page_values.append(float(pdf_output_tokens_per_page)) + for bucket, value in (record.get("format_token_counts") or {}).items(): + format_token_counts[str(bucket)] += int(value or 0) + median_pdf_output_tokens_per_page = None + if pdf_output_tokens_per_page_values: + sorted_values = sorted(pdf_output_tokens_per_page_values) + midpoint = len(sorted_values) // 2 + median_pdf_output_tokens_per_page = ( + sorted_values[midpoint] + if len(sorted_values) % 2 == 1 + else round((sorted_values[midpoint - 1] + sorted_values[midpoint]) / 2.0, 4) + ) + summary = { + "deduped_filing_count": total_filings, + "total_final_token_count": total_tokens, + "total_pdf_page_count": total_pdf_page_count, + "total_pdf_output_token_count": total_pdf_output_token_count, + "pdf_output_tokens_per_page": ( + round(float(total_pdf_output_token_count) / float(total_pdf_page_count), 4) + if total_pdf_page_count > 0 + else None + ), + "median_pdf_output_tokens_per_page": median_pdf_output_tokens_per_page, + "form_type_counts": dict(sorted(form_counts.items())), + "form_type_count_percentages": { + key: _pct(value, total_filings) + for key, value in sorted(form_counts.items()) + }, + "form_type_token_percentages": { + key: _pct(value, total_tokens) + for key, value in sorted(form_token_counts.items()) + }, + "format_token_counts": dict(sorted(format_token_counts.items())), + "format_token_percentages": { + key: _pct(value, sum(format_token_counts.values())) + for key, value in sorted(format_token_counts.items()) + }, + } + parse_stats_summary_file_path.write_text(json.dumps(summary, indent=2, sort_keys=True), encoding="utf-8") + + +def _print_parse_stats_summary(stats: Dict[str, Any]) -> None: + final_tokens = int(stats.get("final_token_count") or stats.get("initial_document_token_count") or 0) + form = stats.get("form_category") or stats.get("form_type") or "unknown" + percentages = stats.get("format_token_percentages") or {} + format_text = ", ".join( + f"{bucket}={percentages.get(bucket, 0):.1f}%" + for bucket in ("html", "xml", "sgml", "pdf", "text", "other") + if percentages.get(bucket, 0) > 0 + ) or "none" + print(f"[parse-stats] tokens={final_tokens:,} | form={form} | formats: {format_text}") + + +def _current_filing_accession_number() -> Optional[str]: + current_path = CURRENT_PROCESSING_FILE + if not current_path or current_path == "Unknown": + return None + + current_name = pathlib.Path(current_path).name + match = re.search(r"\b\d{10}-\d{2}-\d{6}\b", current_name) + if match: + return match.group(0) + + stem = pathlib.Path(current_path).stem.strip() + return stem or None + +def _log_current_filing_ocr(ocr_reason: str) -> None: + accession_number = _current_filing_accession_number() + if not accession_number or accession_number in CURRENT_OCR_LOGGED_FILINGS: + return + + filing_name = pathlib.Path(CURRENT_PROCESSING_FILE).name + CURRENT_OCR_LOGGED_FILINGS.add(accession_number) + ocr_logger.info( + f"ACCESSION: {accession_number}\n" + f"FILING: {filing_name}\n" + f"OCR_REASON: {ocr_reason}" + ) + +CP1252_CTRL_TO_UNICODE = str.maketrans({ + "\x80": "\u20AC", "\x82": "\u201A", "\x83": "\u0192", "\x84": "\u201E", + "\x85": "\u2026", "\x86": "\u2020", "\x87": "\u2021", "\x88": "\u02C6", + "\x89": "\u2030", "\x8A": "\u0160", "\x8B": "\u2039", "\x8C": "\u0152", + "\x8E": "\u017D", "\x91": "\u2018", "\x92": "\u2019", "\x93": "\u201C", + "\x94": "\u201D", "\x95": "\u2022", "\x96": "\u2013", "\x97": "\u2014", + "\x98": "\u02DC", "\x99": "\u2122", "\x9A": "\u0161", "\x9B": "\u203A", + "\x9C": "\u0153", "\x9E": "\u017E", "\x9F": "\u0178", +}) + +PUNCT_CANON = str.maketrans({ + "\u2018": "'", "\u2019": "'", "\uFF07": "'", "\u2032": "'", + "\u2010": "-", "\u2011": "-", "\u2212": "-", +}) + +OCR_API_URL = "https://api.mistral.ai/v1/ocr" +MISTRAL_KEY_STATUS_PATH = pathlib.Path( + os.getenv("SEC_PARSER_MISTRAL_KEY_STATUS_PATH", "").strip() + or (pathlib.Path(__file__).parent / "mistral_key_status.json") +) +MISTRAL_KEY_LOCK_PATH = MISTRAL_KEY_STATUS_PATH.with_suffix(MISTRAL_KEY_STATUS_PATH.suffix + ".lock") +MISTRAL_KEY_ENV_LIMIT = 20 +MISTRAL_KEY_STATE_VERSION = 1 +MISTRAL_MONTHLY_TOKEN_BUDGET_ESTIMATE = 1_000_000_000 +MISTRAL_QUOTA_ERROR_HINTS = ( + "quota", + "credit", + "credits", + "billing", + "monthly", + "free tier", + "experiment plan", + "token limit", + "tokens per month", + "out of tokens", + "usage limit", + "insufficient", +) +MISTRAL_INVALID_KEY_HINTS = ( + "invalid api key", + "incorrect api key", + "authentication", + "unauthorized", + "forbidden", + "revoked", +) + + +def _utc_now_iso() -> str: + return datetime.datetime.now(datetime.timezone.utc).isoformat() + + +def _current_usage_month_label() -> str: + return datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m") + + +def _parse_iso_timestamp(value: Optional[str]) -> Optional[float]: + if not value: + return None + try: + return datetime.datetime.fromisoformat(value).timestamp() + except Exception: + return None + + +def _mistral_key_fingerprint(api_key: str) -> str: + digest = hashlib.sha256((api_key or "").encode("utf-8")).hexdigest() + return digest[:12] + + +def _mistral_env_name_for_index(index: int) -> str: + return "MISTRAL_API_KEY" if index == 1 else f"MISTRAL_API_KEY{index}" + + +def _configured_mistral_key_specs(explicit_api_key: Optional[str] = None) -> List[Dict[str, Any]]: + specs: List[Dict[str, Any]] = [] + seen_keys = set() + for idx in range(1, MISTRAL_KEY_ENV_LIMIT + 1): + env_name = _mistral_env_name_for_index(idx) + api_key = (os.getenv(env_name) or "").strip() + if not api_key or api_key in seen_keys: + continue + seen_keys.add(api_key) + specs.append( + { + "env_name": env_name, + "ordinal": idx, + "api_key": api_key, + "fingerprint": _mistral_key_fingerprint(api_key), + } + ) + + explicit_api_key = (explicit_api_key or "").strip() + if explicit_api_key and explicit_api_key not in seen_keys: + specs.insert( + 0, + { + "env_name": "MISTRAL_API_KEY", + "ordinal": 1, + "api_key": explicit_api_key, + "fingerprint": _mistral_key_fingerprint(explicit_api_key), + }, + ) + return specs + + +def _has_mistral_api_keys(explicit_api_key: Optional[str] = None) -> bool: + return bool(_configured_mistral_key_specs(explicit_api_key=explicit_api_key)) + + +def _mistral_no_keys_message() -> str: + return "No Mistral API keys found in environment variables MISTRAL_API_KEY through MISTRAL_API_KEY20." + + +def _default_mistral_key_record(spec: Dict[str, Any]) -> Dict[str, Any]: + return { + "env_name": spec["env_name"], + "ordinal": int(spec["ordinal"]), + "fingerprint": spec["fingerprint"], + "status": "available", + "request_count": 0, + "success_count": 0, + "pages_processed": 0, + "doc_size_bytes": 0, + "estimated_output_tokens": 0, + "last_selected_at": None, + "last_success_at": None, + "last_error_at": None, + "last_error": None, + "last_error_status_code": None, + "last_rate_limit_headers": {}, + "cooldown_until": None, + "exhausted_at": None, + } + + +def _normalize_mistral_key_state(raw_state: Optional[Dict[str, Any]], key_specs: List[Dict[str, Any]]) -> Dict[str, Any]: + state = dict(raw_state or {}) + state_usage_month = str(state.get("usage_month") or "").strip() + current_usage_month = _current_usage_month_label() + monthly_reset = state_usage_month != current_usage_month + keys_state = {} if monthly_reset else dict(state.get("keys") or {}) + normalized_keys: Dict[str, Dict[str, Any]] = {} + for spec in key_specs: + record = dict(keys_state.get(spec["env_name"]) or {}) + default_record = _default_mistral_key_record(spec) + default_record.update(record) + default_record["env_name"] = spec["env_name"] + default_record["ordinal"] = int(spec["ordinal"]) + default_record["fingerprint"] = spec["fingerprint"] + normalized_keys[spec["env_name"]] = default_record + + state = { + "version": MISTRAL_KEY_STATE_VERSION, + "usage_month": current_usage_month, + "updated_at": _utc_now_iso(), + "active_env_name": state.get("active_env_name"), + "keys": normalized_keys, + } + if state["active_env_name"] not in normalized_keys: + state["active_env_name"] = None + return state + + +def _read_mistral_key_state_unlocked(key_specs: List[Dict[str, Any]]) -> Dict[str, Any]: + if MISTRAL_KEY_STATUS_PATH.exists(): + try: + raw_state = json.loads(MISTRAL_KEY_STATUS_PATH.read_text(encoding="utf-8")) + except Exception: + raw_state = {} + else: + raw_state = {} + return _normalize_mistral_key_state(raw_state, key_specs) + + +def _write_mistral_key_state_unlocked(state: Dict[str, Any]) -> None: + MISTRAL_KEY_STATUS_PATH.parent.mkdir(parents=True, exist_ok=True) + tmp_path = MISTRAL_KEY_STATUS_PATH.with_suffix(MISTRAL_KEY_STATUS_PATH.suffix + ".tmp") + tmp_path.write_text(json.dumps(state, indent=2, sort_keys=True), encoding="utf-8") + tmp_path.replace(MISTRAL_KEY_STATUS_PATH) + + +class _LockedMistralKeyState: + def __init__(self, key_specs: List[Dict[str, Any]]): + self.key_specs = key_specs + self.handle = None + self.state = None + + def __enter__(self) -> Dict[str, Any]: + MISTRAL_KEY_LOCK_PATH.parent.mkdir(parents=True, exist_ok=True) + self.handle = open(MISTRAL_KEY_LOCK_PATH, "a+", encoding="utf-8") + fcntl.flock(self.handle.fileno(), fcntl.LOCK_EX) + self.state = _read_mistral_key_state_unlocked(self.key_specs) + return self.state + + def __exit__(self, exc_type, exc, tb) -> None: + try: + if self.state is not None: + self.state["updated_at"] = _utc_now_iso() + _write_mistral_key_state_unlocked(self.state) + finally: + if self.handle is not None: + fcntl.flock(self.handle.fileno(), fcntl.LOCK_UN) + self.handle.close() + + +def _locked_mistral_key_state(key_specs: List[Dict[str, Any]]) -> _LockedMistralKeyState: + return _LockedMistralKeyState(key_specs) + + +def _select_mistral_key_spec(explicit_api_key: Optional[str] = None) -> Dict[str, Any]: + key_specs = _configured_mistral_key_specs(explicit_api_key=explicit_api_key) + if not key_specs: + raise RuntimeError(_mistral_no_keys_message()) + + lookup = {spec["env_name"]: spec for spec in key_specs} + now_ts = time.time() + with _locked_mistral_key_state(key_specs) as state: + available: List[Tuple[int, Dict[str, Any]]] = [] + cooling: List[Tuple[float, int, Dict[str, Any]]] = [] + + for spec in key_specs: + record = state["keys"][spec["env_name"]] + if record.get("status") in {"exhausted", "invalid"}: + continue + cooldown_until_ts = _parse_iso_timestamp(record.get("cooldown_until")) + if cooldown_until_ts and cooldown_until_ts > now_ts: + cooling.append((cooldown_until_ts, spec["ordinal"], record)) + else: + available.append((spec["ordinal"], record)) + + chosen_record: Optional[Dict[str, Any]] = None + if available: + chosen_record = sorted(available, key=lambda item: item[0])[0][1] + elif cooling: + chosen_record = sorted(cooling, key=lambda item: (item[0], item[1]))[0][2] + + if not chosen_record: + raise RuntimeError( + "All configured Mistral API keys are exhausted or unavailable; stopping after MISTRAL_API_KEY20." + ) + + chosen_record["last_selected_at"] = _utc_now_iso() + chosen_record["request_count"] = int(chosen_record.get("request_count") or 0) + 1 + state["active_env_name"] = chosen_record["env_name"] + spec = dict(lookup[chosen_record["env_name"]]) + spec["status"] = chosen_record.get("status") or "available" + return spec + + +def _record_mistral_key_success(env_name: str, usage: Optional[Dict[str, Any]] = None, explicit_api_key: Optional[str] = None) -> None: + key_specs = _configured_mistral_key_specs(explicit_api_key=explicit_api_key) + if not key_specs: + return + usage = dict(usage or {}) + with _locked_mistral_key_state(key_specs) as state: + record = state["keys"].get(env_name) + if not record: + return + record["status"] = "available" + record["cooldown_until"] = None + record["success_count"] = int(record.get("success_count") or 0) + 1 + record["pages_processed"] = int(record.get("pages_processed") or 0) + int(usage.get("pages_processed") or 0) + record["doc_size_bytes"] = int(record.get("doc_size_bytes") or 0) + int(usage.get("doc_size_bytes") or 0) + record["estimated_output_tokens"] = int(record.get("estimated_output_tokens") or 0) + int(usage.get("estimated_output_tokens") or 0) + if usage.get("rate_limit_headers"): + record["last_rate_limit_headers"] = dict(usage["rate_limit_headers"]) + record["last_success_at"] = _utc_now_iso() + + +def _record_mistral_key_terminal_error( + env_name: str, + *, + status: str, + message: str, + status_code: Optional[int], + explicit_api_key: Optional[str] = None, +) -> None: + key_specs = _configured_mistral_key_specs(explicit_api_key=explicit_api_key) + if not key_specs: + return + now_iso = _utc_now_iso() + with _locked_mistral_key_state(key_specs) as state: + record = state["keys"].get(env_name) + if not record: + return + record["status"] = status + record["last_error"] = (message or "")[:1000] + record["last_error_status_code"] = status_code + record["last_error_at"] = now_iso + record["cooldown_until"] = None + if status == "exhausted": + record["exhausted_at"] = now_iso + + +def _record_mistral_key_cooldown( + env_name: str, + *, + message: str, + status_code: Optional[int], + cooldown_seconds: float, + explicit_api_key: Optional[str] = None, +) -> None: + key_specs = _configured_mistral_key_specs(explicit_api_key=explicit_api_key) + if not key_specs: + return + cooldown_until = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(seconds=max(1.0, float(cooldown_seconds))) + with _locked_mistral_key_state(key_specs) as state: + record = state["keys"].get(env_name) + if not record: + return + record["status"] = "available" + record["cooldown_until"] = cooldown_until.isoformat() + record["last_error"] = (message or "")[:1000] + record["last_error_status_code"] = status_code + record["last_error_at"] = _utc_now_iso() + + +def _extract_rate_limit_headers(headers: Optional[Dict[str, Any]]) -> Dict[str, Any]: + out: Dict[str, Any] = {} + for key, value in dict(headers or {}).items(): + key_str = str(key) + if key_str.lower().startswith("x-ratelimit") or key_str.lower() == "retry-after": + out[key_str] = value + return out + + +def _extract_retry_after_seconds(headers: Optional[Dict[str, Any]], default_seconds: float = 60.0) -> float: + headers = dict(headers or {}) + raw = headers.get("Retry-After") or headers.get("retry-after") + if raw is None: + return default_seconds + try: + return max(1.0, float(raw)) + except Exception: + return default_seconds + + +def _extract_mistral_error_details(exc: Exception) -> Dict[str, Any]: + status_code = getattr(exc, "status_code", None) + response = getattr(exc, "response", None) + headers: Dict[str, Any] = {} + message_parts = [str(exc)] + + if response is not None: + status_code = getattr(response, "status_code", status_code) + headers = dict(getattr(response, "headers", {}) or {}) + try: + response_text = response.text + except Exception: + response_text = "" + if response_text: + message_parts.append(response_text) + try: + response_json = response.json() + except Exception: + response_json = None + if response_json: + message_parts.append(json.dumps(response_json, ensure_ascii=False)) + + for attr_name in ("body", "response_body", "message"): + attr_value = getattr(exc, attr_name, None) + if attr_value: + if not isinstance(attr_value, str): + try: + attr_value = json.dumps(attr_value, ensure_ascii=False) + except Exception: + attr_value = str(attr_value) + message_parts.append(attr_value) + + message = " ".join(part.strip() for part in message_parts if str(part).strip()) + message = re.sub(r"\s+", " ", message).strip() + return { + "status_code": status_code, + "message": message[:2000], + "rate_limit_headers": _extract_rate_limit_headers(headers), + "retry_after_seconds": _extract_retry_after_seconds(headers), + } + + +def _classify_mistral_exception(exc: Exception) -> Dict[str, Any]: + details = _extract_mistral_error_details(exc) + status_code = details["status_code"] + message = (details["message"] or "").lower() + + if status_code == 429 and any(hint in message for hint in MISTRAL_QUOTA_ERROR_HINTS): + details["action"] = "exhausted" + elif status_code in {402, 403} and any(hint in message for hint in MISTRAL_QUOTA_ERROR_HINTS): + details["action"] = "exhausted" + elif status_code == 401 or any(hint in message for hint in MISTRAL_INVALID_KEY_HINTS): + details["action"] = "invalid" + elif status_code == 403: + details["action"] = "invalid" + elif status_code == 429: + details["action"] = "cooldown" + else: + details["action"] = "raise" + return details + + +def _summarize_ocr_usage(ocr_data: Optional[Dict[str, Any]], response_headers: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: + ocr_data = dict(ocr_data or {}) + usage_info = dict(ocr_data.get("usage_info") or {}) + pages = list(ocr_data.get("pages") or []) + estimated_output_tokens = 0 + for page_obj in pages: + estimated_output_tokens += estimate_parser_tokens( + _normalize_ocr_text((page_obj.get("markdown") or page_obj.get("text") or "").strip()) + ) + pages_processed = usage_info.get("pages_processed") + if pages_processed is None: + pages_processed = len(pages) + return { + "pages_processed": int(pages_processed or 0), + "doc_size_bytes": int(usage_info.get("doc_size_bytes") or 0), + "estimated_output_tokens": int(estimated_output_tokens), + "rate_limit_headers": _extract_rate_limit_headers(response_headers), + } + + +def _run_with_mistral_key_rotation( + operation_label: str, + func, + *, + explicit_api_key: Optional[str] = None, +): + key_specs = _configured_mistral_key_specs(explicit_api_key=explicit_api_key) + if not key_specs: + raise RuntimeError(_mistral_no_keys_message()) + + max_attempts = max(1, len(key_specs) * 2) + last_exc: Optional[Exception] = None + + for _ in range(max_attempts): + key_spec = _select_mistral_key_spec(explicit_api_key=explicit_api_key) + key_label = f"{key_spec['env_name']}[{key_spec['fingerprint']}]" + try: + client = Mistral(api_key=key_spec["api_key"]) + return func(client=client, api_key=key_spec["api_key"], key_spec=key_spec) + except Exception as exc: + decision = _classify_mistral_exception(exc) + last_exc = exc + if decision["action"] == "exhausted": + print(f"[mistral] {key_label} exhausted during {operation_label}; switching to the next configured key.") + _record_mistral_key_terminal_error( + key_spec["env_name"], + status="exhausted", + message=decision["message"], + status_code=decision["status_code"], + explicit_api_key=explicit_api_key, + ) + continue + if decision["action"] == "invalid": + print(f"[mistral] {key_label} was rejected during {operation_label}; switching to the next configured key.") + _record_mistral_key_terminal_error( + key_spec["env_name"], + status="invalid", + message=decision["message"], + status_code=decision["status_code"], + explicit_api_key=explicit_api_key, + ) + continue + if decision["action"] == "cooldown" and len(key_specs) > 1: + cooldown_seconds = float(decision.get("retry_after_seconds") or 60.0) + print( + f"[mistral] {key_label} hit a rate limit during {operation_label}; cooling it down for " + f"{cooldown_seconds:.0f}s and trying the next key." + ) + _record_mistral_key_cooldown( + key_spec["env_name"], + message=decision["message"], + status_code=decision["status_code"], + cooldown_seconds=cooldown_seconds, + explicit_api_key=explicit_api_key, + ) + continue + raise + + raise RuntimeError( + "All configured Mistral API keys are exhausted or unavailable; stopping after MISTRAL_API_KEY20." + ) from last_exc + + +def get_mistral_key_status_snapshot(explicit_api_key: Optional[str] = None) -> Dict[str, Any]: + key_specs = _configured_mistral_key_specs(explicit_api_key=explicit_api_key) + with _locked_mistral_key_state(key_specs) as state: + snapshot = json.loads(json.dumps(state)) + now_ts = time.time() + snapshot["status_path"] = str(MISTRAL_KEY_STATUS_PATH) + snapshot["configured_key_count"] = len(key_specs) + snapshot["available_key_count"] = 0 + snapshot["exhausted_key_count"] = 0 + + for record in snapshot.get("keys", {}).values(): + cooldown_until_ts = _parse_iso_timestamp(record.get("cooldown_until")) + record["cooldown_remaining_s"] = ( + max(0.0, round(cooldown_until_ts - now_ts, 3)) if cooldown_until_ts and cooldown_until_ts > now_ts else 0.0 + ) + if record.get("status") == "exhausted": + snapshot["exhausted_key_count"] += 1 + elif record.get("status") != "invalid": + snapshot["available_key_count"] += 1 + return snapshot + + +def reset_mistral_key_status(explicit_api_key: Optional[str] = None) -> Dict[str, Any]: + key_specs = _configured_mistral_key_specs(explicit_api_key=explicit_api_key) + with _locked_mistral_key_state(key_specs) as state: + fresh_state = _normalize_mistral_key_state({}, key_specs) + state.clear() + state.update(fresh_state) + snapshot = json.loads(json.dumps(state)) + snapshot["status_path"] = str(MISTRAL_KEY_STATUS_PATH) + return snapshot + + +def _estimate_monthly_mistral_tokens_used(explicit_api_key: Optional[str] = None) -> int: + snapshot = get_mistral_key_status_snapshot(explicit_api_key=explicit_api_key) + total = 0 + for record in (snapshot.get("keys") or {}).values(): + total += int(record.get("estimated_output_tokens") or 0) + return total + + +def _print_mistral_monthly_usage( + phase: str, + file_name: str, + *, + explicit_api_key: Optional[str] = None, +) -> None: + used_tokens = _estimate_monthly_mistral_tokens_used(explicit_api_key=explicit_api_key) + print( + f"[mistral-usage] {phase} PDF '{file_name}': " + f"{used_tokens:,}/{MISTRAL_MONTHLY_TOKEN_BUDGET_ESTIMATE:,} estimated output tokens used this month" + ) +OCR_MODEL = Config.OCR_MODEL + +MISTRAL_OCR_HTML_TABLE_PROMPT = ( + "Extract the document content. Return prose and non-table text as Markdown. " + "For every table, return real HTML table markup instead of Markdown tables. " + "Use only plain table structure tags such as , ,
, and , " + "with colspan and rowspan when needed to preserve merged cells. " + "Do not wrap HTML tables in markdown code fences. Preserve visible table text, " + "row order, column order, punctuation, signs, and numeric formatting." +) + + +def _mistral_ocr_table_format() -> str: + value = os.getenv("MISTRAL_OCR_TABLE_FORMAT", "html").strip().lower() + if value in {"html", "markdown"}: + return value + return "html" + + +def _build_mistral_ocr_payload(signed_url: str) -> dict: + table_format = _mistral_ocr_table_format() + payload = { + "model": OCR_MODEL, + "document": {"document_url": signed_url}, + "table_format": table_format, + } + if table_format == "html": + payload["document_annotation_format"] = { + "type": "json_schema", + "json_schema": { + "name": "ocr_markdown_with_html_tables", + "strict": True, + "schema": { + "type": "object", + "properties": { + "content": {"type": "string"}, + }, + "required": ["content"], + "additionalProperties": False, + }, + }, + } + payload["document_annotation_prompt"] = MISTRAL_OCR_HTML_TABLE_PROMPT + return payload + + +def _post_mistral_ocr_with_retry( + *, + headers: Dict[str, str], + payload: Dict[str, Any], + operation_label: str, + timeout_s: int = 600, +): + max_retries = max(1, int(getattr(Config, "API_MAX_RETRIES", 4) or 4)) + delay = max(0.1, float(getattr(Config, "API_INITIAL_DELAY_SECONDS", 2.0) or 2.0)) + transient_statuses = {408, 409, 425, 429, 500, 502, 503, 504} + last_exc: Optional[Exception] = None + + for attempt in range(1, max_retries + 1): + response = None + try: + response = requests.post(OCR_API_URL, headers=headers, json=payload, timeout=timeout_s) + if response.status_code not in transient_statuses: + response.raise_for_status() + return response + response.raise_for_status() + except Exception as exc: + last_exc = exc + status_code = getattr(response, "status_code", None) + is_transient = status_code in transient_statuses or isinstance( + exc, + ( + requests.Timeout, + requests.ConnectionError, + ), + ) + if not is_transient or attempt >= max_retries: + raise + retry_after = None + if response is not None: + retry_after_raw = response.headers.get("Retry-After") + try: + retry_after = float(retry_after_raw) if retry_after_raw else None + except (TypeError, ValueError): + retry_after = None + sleep_s = max(delay, retry_after or 0.0) + random.uniform(0, 0.5) + print( + f"OCR API transient error during {operation_label}: {exc}. " + f"Retrying in {sleep_s:.2f}s... (Attempt {attempt}/{max_retries})" + ) + time.sleep(sleep_s) + delay = min(delay * 2, 60.0) + + if last_exc is not None: + raise last_exc + raise RuntimeError(f"OCR API request failed during {operation_label}") + + +def _decode_uu_block_bytes(text: str) -> bytes: + lines = text.splitlines() + begin_idx = next((i for i, line in enumerate(lines) if re.match(r"^begin\s+\d{3}\s+[^\n]+$", line.strip())), None) + if begin_idx is None: + raise ValueError("Could not find a valid 'begin' line in the uuencoded block.") + + decoded = bytearray() + found_end = False + + for line in lines[begin_idx + 1:]: + uu_line = line.rstrip("\r\n") + if uu_line == "end": + found_end = True + break + if uu_line == "": + continue + try: + decoded.extend(binascii.a2b_uu(uu_line.encode("latin-1"))) + except binascii.Error as e: + if "Trailing garbage" not in str(e): + raise ValueError(f"Failed to decode uuencoded block: {e}") from e + + recovered = None + for end in range(len(uu_line) - 1, 0, -1): + try: + candidate = binascii.a2b_uu(uu_line[:end].encode("latin-1")) + recovered = candidate + break + except binascii.Error: + continue + + if recovered is None: + raise ValueError(f"Failed to decode uuencoded block: {e}") from e + + decoded.extend(recovered) + + if not found_end: + raise ValueError("Could not find the terminating 'end' line in the uuencoded block.") + + return bytes(decoded) + +def _extract_uu_block(text: str) -> Tuple[bytes, str]: + """Finds and decodes the first uuencoded block.""" + fname_match = re.search(r"begin\s+\d{3}\s+([^\n]+)", text) + if not fname_match: + raise ValueError("Could not find a valid 'begin' line in the uuencoded block.") + filename = fname_match.group(1).strip() + decoded_bytes = _decode_uu_block_bytes(text) + if not decoded_bytes: + raise ValueError("UU decoding produced no data.") + if decoded_bytes.startswith(b"%PDF-") and b"%%EOF" not in decoded_bytes[-1024:]: + decoded_bytes += b"\n%%EOF\n" + return decoded_bytes, filename + +def _slice_pdf_bytes(pdf_bytes: bytes, first_page: int, last_page: Optional[int] = None) -> bytes: + """Extracts a page range from a PDF bytes object.""" + reader = PdfReader(io.BytesIO(pdf_bytes)) + writer = PdfWriter() + num_pages = len(reader.pages) + start_idx = first_page - 1 + end_idx = num_pages if last_page is None else min(last_page, num_pages) + for i in range(start_idx, end_idx): + writer.add_page(reader.pages[i]) + with io.BytesIO() as buf: + writer.write(buf) + return buf.getvalue() + +def is_page_nearly_blank(page: fitz.Page, threshold: float = 3.5) -> bool: + """Checks if a page is visually blank by analyzing pixel standard deviation.""" + pix = page.get_pixmap(matrix=fitz.Matrix(0.5, 0.5), colorspace=fitz.csGRAY, alpha=False) + img_data = np.frombuffer(pix.samples, dtype=np.uint8) + if img_data.size < 100: return True + std_dev = np.std(img_data) + return std_dev < threshold + +def realign_fixed_width_table(text: str) -> str: + """Parses and perfectly reformats a poorly-aligned fixed-width table.""" + lines = text.strip().split('\n') + if len(lines) < 2: return text + separator_index = next((i for i, line in enumerate(lines) if '--' in line and len(line.replace('-', '').replace(' ', '')) < 5), -1) + if separator_index == -1: return text + separator_line = lines[separator_index] + boundaries = [] + in_dash = False + for i, char in enumerate(separator_line): + if char == '-' and not in_dash: + in_dash = True + start = i + elif char == ' ' and in_dash: + in_dash = False + boundaries.append((start, i)) + if in_dash: boundaries.append((start, len(separator_line))) + if not boundaries: return text + rows = [[line[s:e].strip() for s, e in boundaries] for line in lines if line.strip() != separator_line.strip() and any(line[s:e].strip() for s, e in boundaries)] + if not rows: return text + widths = [max(len(cell) for cell in col) for col in zip(*rows)] + realigned = [] + header, body = rows[0], rows[1:] + realigned.append(' '.join(h.ljust(w) for h, w in zip(header, widths))) + realigned.append(' '.join('-' * w for w in widths)) + for row in body: + full_row = row + [''] * (len(widths) - len(row)) + realigned.append(' '.join([full_row[0].ljust(widths[0])] + [c.rjust(w) for c, w in zip(full_row[1:], widths[1:])])) + return '\n'.join(realigned) + +def _ascii_text(s: str) -> str: + s = _normalize_ocr_text(s or "") + s = s.translate(PUNCT_CANON) + s = s.replace("\u201c", '"').replace("\u201d", '"') + s = s.replace("\u2022", "*").replace("\u00b7", "*") + s = unicodedata.normalize("NFKD", s).encode("ascii", "ignore").decode("ascii") + return re.sub(r"\s+", " ", s).strip() + +def is_numeric_like(s: str) -> bool: + s = _ascii_text(s) + if s in {"", "-"}: + return False + s = s.replace(",", "").replace("$", "").replace("%", "") + if s.startswith("(") and s.endswith(")"): + s = "-" + s[1:-1] + try: + float(s) + return True + except ValueError: + return False + +def table_to_fixed_width(table) -> str: + rows = [] + for tr in table.find_all("tr"): + cells = [_ascii_text(c.get_text(" ", strip=True)) for c in tr.find_all(["th", "td"])] + if cells: + rows.append(cells) + + if not rows: + return "" + + col_count = max(len(r) for r in rows) + rows = [r + [""] * (col_count - len(r)) for r in rows] + + aligns = [] + for j in range(col_count): + if j == 0: + aligns.append("left") + continue + + col_vals = [r[j] for r in rows[1:] if r[j].strip()] + numeric_count = sum(is_numeric_like(v) for v in col_vals) + aligns.append("right" if col_vals and numeric_count >= len(col_vals) / 2 else "left") + + widths = [max(len(r[j]) for r in rows) for j in range(col_count)] + + def fmt(row): + out = [] + for j, cell in enumerate(row): + if aligns[j] == "right": + out.append(cell.rjust(widths[j])) + else: + out.append(cell.ljust(widths[j])) + return " ".join(out).rstrip() + + lines = [] + for i, row in enumerate(rows): + lines.append(fmt(row)) + if i == 0: + lines.append(" ".join("-" * w for w in widths).rstrip()) + + return "\n".join(lines) + +def _html_to_fixed_width_ascii(html_fragment: str) -> str: + html_fragment = (html_fragment or "").strip() + if html_fragment.startswith("```"): + html_fragment = re.sub(r"^```(?:html)?\s*", "", html_fragment, flags=re.I) + html_fragment = re.sub(r"\s*```$", "", html_fragment) + + soup = BeautifulSoup(html_fragment, "html.parser") + root = soup.body if soup.body else soup + output_parts: List[str] = [] + + def append_text(text: str): + text = _ascii_text(text) + if not text: + return + m = re.match(r"^\*\*(.+?)\*\*$", text) + if m: + output_parts.append(m.group(1).strip().upper()) + else: + output_parts.append(text) + + def walk(nodes): + for node in nodes: + if isinstance(node, NavigableString): + append_text(str(node)) + continue + + node_name = getattr(node, "name", None) + if node_name == "table": + fixed = table_to_fixed_width(node) + if fixed.strip(): + output_parts.append(fixed) + continue + if node_name == "br": + continue + if getattr(node, "contents", None): + walk(node.contents) + else: + append_text(node.get_text(" ", strip=True)) + + walk(root.contents) + return "\n\n".join(part for part in output_parts if part.strip()) + +_DASH_TRANSLATE = str.maketrans({ + "\u2010": "-", "\u2011": "-", "\u2012": "-", "\u2013": "-", "\u2014": "-", "\u2015": "-", + "\u2212": "-", "\uFE58": "-", "\uFE63": "-", "\uFF0D": "-" +}) + +def _normalize_ocr_text(s: str) -> str: + s = unicodedata.normalize("NFKC", s or "") + s = s.replace("\u00A0", " ").replace("\ufeff", "").replace("\u200b", "").replace("\u200d", "") + s = s.translate(_DASH_TRANSLATE) + if "\\n" in s: s = s.replace(r"\n", "\n") + return s + +_CELL_SEP_RE = re.compile(r'^\s*:?\s*-{2,}\s*:?\s*$') +_MISTRAL_HTML_TABLE_FRAGMENT_RE = re.compile(r"", re.I) + +def _is_separator_line(line: str) -> bool: + line = _normalize_ocr_text(line) + if '|' not in line: return False + core = line.strip() + if not core.startswith('|'): return False + parts = [p.strip() for p in core.strip('|').split('|')] + if not parts or any(p == '' for p in parts): return False + return all(_CELL_SEP_RE.match(p or '') for p in parts) + +def find_md_table_blocks(text: str) -> List[Tuple[int,int]]: + text = _normalize_ocr_text(text) + lines = text.splitlines() + blocks: List[Tuple[int,int]] = [] + i, n = 0, len(lines) + while i < n - 1: + if '|' in lines[i].lstrip() and i + 1 < n and _is_separator_line(lines[i+1]): + j, has_body = i + 2, False + while j < n and '|' in lines[j].lstrip() and lines[j].strip() != '': + has_body = True + j += 1 + if has_body: + blocks.append((i, j-1)) + i = j + continue + i += 1 + return blocks + +def slice_text_by_blocks(text: str, blocks: List[Tuple[int,int]]) -> List[str]: + text = _normalize_ocr_text(text) + lines = text.splitlines() + return ["\n".join(lines[s:e+1]) for s, e in blocks] + +def replace_blocks_with(text: str, blocks: List[Tuple[int,int]], repl_texts: List[str]) -> str: + text = _normalize_ocr_text(text) + lines = text.splitlines() + out, cur, k = [], 0, 0 + for s, e in blocks: + out.extend(lines[cur:s]) + out.extend(repl_texts[k].splitlines()) + cur = e + 1 + k += 1 + out.extend(lines[cur:]) + return "\n".join(out) + +def _inline_mistral_table_placeholders(page_obj, text_content: str) -> str: + rendered = str(text_content or "") + tables = page_obj.get("tables") if isinstance(page_obj, dict) else None + if not isinstance(tables, list) or not tables: + return rendered + + inlined_count = 0 + fallback_contents = [] + for table in tables: + if not isinstance(table, dict): + continue + table_id = _normalize_ocr_text(str(table.get("id") or "")).strip() + table_content = _normalize_ocr_text( + str(table.get("content") or table.get("html") or table.get("markdown") or "") + ).strip() + if not table_content: + continue + if table_id: + placeholder = f"[{table_id}]({table_id})" + if placeholder in rendered: + rendered = rendered.replace(placeholder, table_content) + inlined_count += 1 + continue + fallback_contents.append(table_content) + + if inlined_count == 0 and fallback_contents: + missing_contents = [ + content + for content in fallback_contents + if content not in rendered + ] + if missing_contents: + if rendered.strip(): + rendered = rendered.rstrip() + "\n\n" + "\n\n".join(missing_contents) + else: + rendered = "\n\n".join(missing_contents) + + return rendered + + +def _mistral_table_cell_text(cell) -> str: + text = _normalize_ocr_text(cell.get_text(" ", strip=True)) + text = re.sub(r"\s+", " ", text).strip() + text = text.replace("|", r"\|") + return text + + +def _html_table_fragment_to_mmd(table_html: str) -> str: + try: + soup = BeautifulSoup(table_html, "html.parser") + except Exception: + return table_html + + table = soup.find("table") + if table is None: + return table_html + + rows = [] + pending_rowspans = {} + for tr in table.find_all("tr"): + row = [] + col = 0 + for cell in tr.find_all(["th", "td"], recursive=False): + while pending_rowspans.get(col, 0) > 0: + row.append("") + pending_rowspans[col] -= 1 + col += 1 + + try: + colspan = max(1, int(cell.get("colspan", 1))) + except (TypeError, ValueError): + colspan = 1 + try: + rowspan = max(1, int(cell.get("rowspan", 1))) + except (TypeError, ValueError): + rowspan = 1 + + row.append(_mistral_table_cell_text(cell)) + for _ in range(1, colspan): + row.append("") + + if rowspan > 1: + for offset in range(colspan): + pending_rowspans[col + offset] = max(pending_rowspans.get(col + offset, 0), rowspan - 1) + col += colspan + + while pending_rowspans.get(col, 0) > 0: + row.append("") + pending_rowspans[col] -= 1 + col += 1 + + if any(cell.strip() for cell in row): + rows.append(row) + + if not rows: + return table_html + + width = max(len(row) for row in rows) + padded_rows = [row + [""] * (width - len(row)) for row in rows] + + def render_row(row) -> str: + return "| " + " | ".join(cell or " " for cell in row) + " |" + + separator = "| " + " | ".join("---" for _ in range(width)) + " |" + return "\n".join([render_row(padded_rows[0]), separator, *[render_row(row) for row in padded_rows[1:]]]) + + +def _convert_mistral_html_tables_to_mmd(text: str) -> str: + if not text or " str: + rendered = _html_table_fragment_to_mmd(match.group(0)) + return f"\n\n{rendered}\n\n" + + converted = _MISTRAL_HTML_TABLE_FRAGMENT_RE.sub(replace_table, text) + return re.sub(r"\n{3,}", "\n\n", converted).strip() + + +def _pick_text(page_obj) -> str: + text_content = _normalize_ocr_text((page_obj.get("markdown") or page_obj.get("text") or "").strip()) + inlined_text = _inline_mistral_table_placeholders(page_obj, text_content) + return _normalize_ocr_text(_convert_mistral_html_tables_to_mmd(inlined_text).strip()) + +def _full_page_data_uri(doc: fitz.Document, page_index0: int, zoom: float = Config.IMAGE_RENDER_ZOOM) -> str: + page = doc.load_page(page_index0) + pix = page.get_pixmap(matrix=fitz.Matrix(zoom, zoom), alpha=False) + return f"data:image/png;base64,{base64.b64encode(pix.tobytes('png')).decode('utf-8')}" + +def get_signed_url_with_retry(client, file_id, max_retries=Config.API_MAX_RETRIES, initial_delay=Config.API_INITIAL_DELAY_SECONDS): + """ + Attempts to get a signed URL, retrying with exponential backoff if a 404 error occurs. + """ + delay = initial_delay + for attempt in range(max_retries): + try: + signed_url = client.files.get_signed_url(file_id=file_id).url + return signed_url + except SDKError as e: + if e.status_code == 404 and attempt < max_retries - 1: + print(f"File ID {file_id} not found yet. Retrying in {delay:.2f}s... (Attempt {attempt + 1}/{max_retries})") + time.sleep(delay) + delay *= 2 + delay += random.uniform(0, 0.1) + else: + raise e + raise Exception(f"Failed to get signed URL for file {file_id} after {max_retries} attempts.") + +def _process_pdf_bytes_with_fallback( + pdf_bytes: bytes, + file_name: str, + *, + batch_size: int, + mistral_api_key: Optional[str], + per_table_sleep_s: float, + start_time: float, + time_limit_s: int, +): + """ + Main PDF processing workflow that takes bytes and returns processed content + and a boolean indicating if a timeout occurred, plus the number of PDF pages + successfully parsed through OCR. + """ + _log_current_filing_ocr("pdf_or_rendered_html") + _print_mistral_monthly_usage("before", file_name, explicit_api_key=mistral_api_key) + doc = fitz.open(stream=pdf_bytes, filetype="pdf") + n_total = doc.page_count + + results = [] + parsed_page_count = 0 + timed_out = False + + print(f"[init] processing '{file_name}' ({n_total} pages) in batches of {batch_size}…") + p = 1 + while p <= n_total: + if time.time() - start_time > time_limit_s: + print(f"\n[timeout] Time limit of {time_limit_s // 60} minutes reached. Stopping processing for this document.") + timed_out = True + break + + q = min(p + batch_size - 1, n_total) + print(f"[basic] pages {p}–{q} …") + + pages = [] + try: + chunk_bytes = _slice_pdf_bytes(pdf_bytes, first_page=p, last_page=q) + + def _run_batch_ocr(*, client: Mistral, api_key: str, key_spec: Dict[str, Any]): + up = client.files.upload(file={"file_name": f"chunk_{p}-{q}_{file_name}", "content": chunk_bytes}, purpose="ocr") + + if not up or not up.id: + raise Exception("File upload failed to return a valid ID.") + + signed_url = get_signed_url_with_retry(client, file_id=up.id) + headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"} + payload = _build_mistral_ocr_payload(signed_url) + response = _post_mistral_ocr_with_retry( + headers=headers, + payload=payload, + operation_label=f"pdf batch {p}-{q} for {file_name}", + timeout_s=600, + ) + ocr_data = response.json() + usage = _summarize_ocr_usage(ocr_data, response.headers) + return ocr_data.get("pages", []), usage, key_spec["env_name"] + + pages, usage, used_env_name = _run_with_mistral_key_rotation( + f"pdf batch {p}-{q} for {file_name}", + _run_batch_ocr, + explicit_api_key=mistral_api_key, + ) + _record_mistral_key_success(used_env_name, usage=usage, explicit_api_key=mistral_api_key) + + except Exception as e: + print(f"API processing for pages {p}-{q} failed and was skipped. Error: {e}") + error_message = f"Could not repair '{file_name}'. The file may be severely corrupted. Error: {e}" + logging.error( + f"FILE: {file_name}\nERROR: {error_message}\nTRACEBACK:\n{traceback.format_exc()}" + ) + + if not pages: + p = q + 1 + continue + parsed_page_count += len(pages) + + for i, page_obj in enumerate(pages): + page_no = p + i + doc_page = doc.load_page(page_no - 1) + if is_page_nearly_blank(doc_page): + print(f"[page {page_no}] is nearly blank -> skipping.") + continue + + text_basic = _pick_text(page_obj) + results.append({"page": page_no, "content": text_basic, "source": "mistral-ocr"}) + + p = q + 1 + + _print_mistral_monthly_usage("after", file_name, explicit_api_key=mistral_api_key) + return results, timed_out, parsed_page_count + +def _try_repair_pdf_bytes(pdf_bytes: bytes, file_name: str) -> bytes: + """ + Attempts to repair a potentially truncated or corrupted PDF byte stream. + """ + if not pdf_bytes.startswith(b"%PDF-"): + print(f"[warning] Data for '{file_name}' does not appear to be a PDF. Skipping repair.") + return pdf_bytes + + if b"%%EOF" not in pdf_bytes[-1024:]: + print(f"[info] PDF '{file_name}' appears truncated. Appending EOF marker for recovery.") + pdf_bytes += b"\n%%EOF\n" + + try: + with fitz.open(stream=pdf_bytes, filetype="pdf") as doc: + if doc.needs_pass: + print(f"[warning] PDF '{file_name}' is password protected and cannot be repaired or processed.") + return None + + with io.BytesIO() as output_buffer: + doc.save(output_buffer, garbage=4, clean=True, deflate=True) + print(f"[success] Successfully repaired and rebuilt '{file_name}'.") + return output_buffer.getvalue() + + except Exception as e: + print(f"[error] Could not repair '{file_name}'. The file may be severely corrupted. Error: {e}") + logging.error( + f"FILE: {file_name}\n" + f"ERROR: Could not process PDF attachment: {e}\n" + f"TRACEBACK:\n{traceback.format_exc()}" + ) + return None + + return pdf_bytes + +def parse_pdf_attachments(pdf_blobs) -> tuple[str, int]: + """ + Parses uu-encoded PDF attachments using a high-quality, in-memory workflow. + This version includes an attempt to repair corrupted PDFs. + """ + _load_sec_parser_env() + if not _has_mistral_api_keys(): + print(f"{_mistral_no_keys_message()} Skipping PDF processing.") + return "", 0 + + TIME_LIMIT_SECONDS = Config.PDF_TIMEOUT_LIMIT * 60 + start_time = time.time() + timed_out = False + total_parsed_page_count = 0 + + md_parts = ["\n### Attached PDF Documents\n"] + + for i, pdf_data in enumerate(pdf_blobs, 1): + if timed_out: + break + + filename = "unknown.pdf" + try: + pdf_bytes, filename = _extract_uu_block(pdf_data) + + repaired_pdf_bytes = _try_repair_pdf_bytes(pdf_bytes, filename) + + if not repaired_pdf_bytes: + md_parts.append(f"**Attachment {i}:** `{filename}` – Corrupted and could not be repaired.") + continue + + if not repaired_pdf_bytes.startswith(b"%PDF-"): + md_parts.append(f"**Attachment {i}:** `{filename}` – not a PDF.") + continue + + md_parts.append(f"**Attachment {i}:** `{filename}`") + + page_results, timed_out_during_processing, parsed_page_count = _process_pdf_bytes_with_fallback( + pdf_bytes=repaired_pdf_bytes, + file_name=filename, + batch_size=Config.PDF_BATCH_SIZE, + mistral_api_key=None, + per_table_sleep_s=Config.PER_TABLE_SLEEP_SECONDS, + start_time=start_time, + time_limit_s=TIME_LIMIT_SECONDS + ) + total_parsed_page_count += int(parsed_page_count or 0) + + if timed_out_during_processing: + timed_out = True + + attachment_content_parts = [res.get('content', '') for res in page_results if res.get('content')] + + if attachment_content_parts: + md_parts.append("\n\n".join(attachment_content_parts)) + else: + if not timed_out: + md_parts.append("_No text found in this document._") + + except Exception as e: + md_parts.append(f"Could not process `{filename}`: {e}") + logging.error( + f"FILE: {filename}\n" + f"ERROR: Could not process PDF attachment: {e}\n" + f"TRACEBACK:\n{traceback.format_exc()}" + ) + traceback.print_exc() + + if timed_out: + md_parts.append("\n\n**Time limit hit – remaining pages or documents were skipped.**") + + return "\n\n".join(md_parts), total_parsed_page_count + +_CP1252_CTRL_RE = re.compile(r'[\x80-\x9F]') + + +def _has_cp1252_ctrls(s: str) -> bool: + return bool(_CP1252_CTRL_RE.search(s)) + +def normalize_text_markup(markup): + if isinstance(markup, bytes): + utf8_text = None + try: + utf8_text = markup.decode('utf-8') + except UnicodeDecodeError: + utf8_text = None + + if utf8_text is not None and not _has_cp1252_ctrls(utf8_text): + text = utf8_text + else: + markup = UnicodeDammit.detwingle(markup) + + utf8_text = None + try: + utf8_text = markup.decode('utf-8') + except UnicodeDecodeError: + utf8_text = None + + ud = UnicodeDammit(markup, is_html=True, smart_quotes_to='unicode') + text = ud.unicode_markup + detected_encoding = (ud.original_encoding or '').lower() + + if utf8_text is not None and detected_encoding.startswith('mac_'): + text = utf8_text + elif (not text) or ('\uFFFD' in text) or _has_cp1252_ctrls(text): + text = markup.decode('latin-1', errors='strict') + else: + text = str(markup) + + text = text.translate(CP1252_CTRL_TO_UNICODE) + + mojibake_map = { + 'â�”': '—', + 'â�“': '–', + 'â�™': "'", + 'â�œ': '"', + 'â�d': '"', + 'â�‰': ' ', + } + for bad, good in mojibake_map.items(): + text = text.replace(bad, good) + + text = text.replace("<", "<").replace(">", ">").replace("    ", "##INDENT##") + + text = html.unescape(text) + + text = (text + .replace('\u00AD', '') + .replace('\u00A0', ' ') + .replace('\u2007', ' ') + .replace('\u202F', ' ') + .replace('\u2009', ' ') + .replace('\u2014', '—') + .replace('\u2013', '–')) + + text = text.translate(PUNCT_CANON) + + return unicodedata.normalize('NFC', text) + +ORDER_I = [ + "1. Title of Security##ROWSPAN_1##
1. Title of Security##ROWSPAN_1##", + "2. Transaction Date##ROWSPAN_2##
2. Transaction Date##ROWSPAN_2##", + "2A. Deemed Execution Date##ROWSPAN_3##
2A. Deemed Execution Date##ROWSPAN_3##", + "3. Transaction Code (V)##COLSPAN_1##
Code", + "3. Transaction Code (V)##COLSPAN_1##
V", + "4. Securities Acquired (A) or Disposed of (D)##COLSPAN_2##
Amount", + "4. Securities Acquired (A) or Disposed of (D)##COLSPAN_2##
(A) or (D)", + "4. Securities Acquired (A) or Disposed of (D)##COLSPAN_2##
Price", + "5. Amount of Securities Beneficially Owned##ROWSPAN_4##
5. Amount of Securities Beneficially Owned##ROWSPAN_4##", + "6. Ownership Form##ROWSPAN_5##
6. Ownership Form##ROWSPAN_5##", + "7. Nature of Indirect Beneficial Ownership##ROWSPAN_6##
7. Nature of Indirect Beneficial Ownership##ROWSPAN_6##", +] + +ORDER_II = [ + "1. Title of Derivative Security##ROWSPAN_7##
1. Title of Derivative Security##ROWSPAN_7##", + "2. Conversion or Exercise Price##ROWSPAN_8##
2. Conversion or Exercise Price##ROWSPAN_8##", + "3. Transaction Date##ROWSPAN_9##
3. Transaction Date##ROWSPAN_9##", + "3A. Deemed Execution Date##ROWSPAN_10##
3A. Deemed Execution Date##ROWSPAN_10##", + "4. Transaction Code (V)##COLSPAN_3##
Code", + "4. Transaction Code (V)##COLSPAN_3##
V", + "5. Number of Derivative Securities Acquired (A) or Disposed of (D)##COLSPAN_4##
(A)", + "5. Number of Derivative Securities Acquired (A) or Disposed of (D)##COLSPAN_4##
(D)", + "6. Date Exercisable and Expiration Date##COLSPAN_5##
Date Exercisable", + "6. Date Exercisable and Expiration Date##COLSPAN_5##
Expiration Date", + "7. Title and Amount of Underlying Securities##COLSPAN_6##
Title", + "7. Title and Amount of Underlying Securities##COLSPAN_6##
Amount or Number of Shares", + "8. Price of Derivative Security##ROWSPAN_11##
8. Price of Derivative Security##ROWSPAN_11##", + "9. Number of Derivative Securities Beneficially Owned##ROWSPAN_12##
9. Number of Derivative Securities Beneficially Owned##ROWSPAN_12##", + "10. Ownership Form##ROWSPAN_13##
10. Ownership Form##ROWSPAN_13##", + "11. Nature of Indirect Beneficial Ownership##ROWSPAN_14##
11. Nature of Indirect Beneficial Ownership##ROWSPAN_14##", +] + +ORDER_I_FORM3 = [ + "1. Title of Security", + "2. Amount of Securities Beneficially Owned", + "3. Ownership Form", + "4. Nature of Indirect Beneficial Ownership", +] + +ORDER_II_FORM3 = [ + "1. Title of Derivative Security##ROWSPAN_1##
1. Title of Derivative Security##ROWSPAN_1##", + "2. Date Exercisable and Expiration Date (Month/Day/Year)##COLSPAN_1##
Date Exercisable", + "2. Date Exercisable and Expiration Date (Month/Day/Year)##COLSPAN_1##
Expiration Date", + "3. Title and Amount of Underlying Securities##COLSPAN_2##
Title", + "3. Title and Amount of Underlying Securities##COLSPAN_2##
Amount or Number of Shares", + "4. Conversion or Exercise Price##ROWSPAN_2##
4. Conversion or Exercise Price##ROWSPAN_2##", + "5. Ownership Form##ROWSPAN_3##
5. Ownership Form##ROWSPAN_3##", + "6. Nature of Indirect Beneficial Ownership##ROWSPAN_4##
6. Nature of Indirect Beneficial Ownership##ROWSPAN_4##", +] + +SEC_COUNTRY_CODES = { + 'B9': 'ANTIGUA AND BARBUDA', + 'E9': 'CAYMAN ISLANDS', + 'F4': 'CHINA', + 'K3': 'HONG KONG', + 'AL': 'ALABAMA', 'AK': 'ALASKA', 'AZ': 'ARIZONA', 'AR': 'ARKANSAS', 'CA': 'CALIFORNIA', + 'CO': 'COLORADO', 'CT': 'CONNECTICUT', 'DE': 'DELAWARE', 'DC': 'DISTRICT OF COLUMBIA', + 'FL': 'FLORIDA', 'GA': 'GEORGIA', 'HI': 'HAWAII', 'ID': 'IDAHO', 'IL': 'ILLINOIS', + 'IN': 'INDIANA', 'IA': 'IOWA', 'KS': 'KANSAS', 'KY': 'KENTUCKY', 'LA': 'LOUISIANA', + 'ME': 'MAINE', 'MD': 'MARYLAND', 'MA': 'MASSACHUSETTS', 'MI': 'MICHIGAN', + 'MN': 'MINNESOTA', 'MS': 'MISSISSIPPI', 'MO': 'MISSOURI', 'MT': 'MONTANA', + 'NE': 'NEBRASKA', 'NV': 'NEVADA', 'NH': 'NEW HAMPSHIRE', 'NJ': 'NEW JERSEY', + 'NM': 'NEW MEXICO', 'NY': 'NEW YORK', 'NC': 'NORTH CAROLINA', 'ND': 'NORTH DAKOTA', + 'OH': 'OHIO', 'OK': 'OKLAHOMA', 'OR': 'OREGON', 'PA': 'PENNSYLVANIA', + 'RI': 'RHODE ISLAND', 'SC': 'SOUTH CAROLINA', 'SD': 'SOUTH DAKOTA', + 'TN': 'TENNESSEE', 'TX': 'TEXAS', 'UT': 'UTAH', 'VT': 'VERMONT', 'VA': 'VIRGINIA', + 'WA': 'WASHINGTON', 'WV': 'WEST VIRGINIA', 'WI': 'WISCONSIN', 'WY': 'WYOMING', + 'A0': 'ALBERTA', 'A1': 'BRITISH COLUMBIA', 'A2': 'MANITOBA', 'A3': 'NEW BRUNSWICK', + 'A4': 'NEWFOUNDLAND', 'A5': 'NOVA SCOTIA', 'A6': 'ONTARIO', 'A7': 'PRINCE EDWARD ISLAND', + 'A8': 'QUEBEC', 'A9': 'SASKATCHEWAN', 'B0': 'YUKON TERRITORY', + 'D4': 'GERMANY', 'G6': 'NETHERLANDS', 'H2': 'SWITZERLAND', 'L8': 'UNITED KINGDOM', + 'Z4': 'ISRAEL' +} + +ITEM_HEADING = re.compile( + r'^\s*Item[\s\u00A0]+\d+[A-Za-z]?\.[^.]*\s*$', re.I +) +SUBITEM_HEAD = re.compile(r'^\s*\([A-Za-z0-9]+\)\s+.+$') + +_sup_re = re.compile(r'(.*?)', re.I | re.S) + +PART_HEADING = re.compile(r'^\s*PART\s+[IVXLC]+\b.*\s*$', re.I) + +DOT_ROW = re.compile( + r"""^\s* + (?P
, parses it to a Markdown + table, replaces all pipes '|' with '' and all newlines '\n' with + '##MD_NEWLINE##', and then replaces the original div with this new + single-line string. + """ + for div_table in soup.select('td > div[style*="display: table"]'): + try: + div_html = str(div_table) + temp_soup = BeautifulSoup(div_html, 'lxml') + + if (table_tag := temp_soup.find('div', style=re.compile(r'display:\s*table'))): + table_tag.name = 'table' + for row in temp_soup.find_all('div', style=re.compile(r'display:\s*table-row')): + row.name = 'tr' + for cell in temp_soup.find_all('div', style=re.compile(r'display:\s*table-cell')): + cell.name = 'td' + + df_list = pd.read_html(io.StringIO(str(temp_soup)), flavor="lxml", keep_default_na=False, header=0) + if not df_list: + continue + + df = df_list[0] + + df.dropna(how='all', axis=1, inplace=True) + df.dropna(how='all', axis=0, inplace=True) + df = df.reset_index(drop=True) + + if df.empty: + continue + + md_table_string = to_compact_markdown(df, index=False) + + placeholder_string = md_table_string.replace('\n', '##MD_NEWLINE##').replace('|', '') + + div_table.replace_with(NavigableString(placeholder_string)) + + except Exception as e: + print(f"[Warning] Could not process nested div-table. Removing it. Error: {e}") + div_table.decompose() + +def parse_html_filing(html_content: str, form_type: str = "", file_path: Optional[pathlib.Path] = None) -> str: + """ + Parses HTML content into Markdown using an intelligent text assembly method. + """ + global LAST_POSITIONED_HTML_OCR_PAGE_COUNT + LAST_POSITIONED_HTML_OCR_PAGE_COUNT = 0 + start_time = time.time() + time_limit_s = Config.HTML_TIMEOUT_LIMIT * 60 + timed_out = False + + count = 0 + + r_tag_pattern = re.compile( + r'(?:<|\\>)\s*/?\s*\bR\b\s*(?:>|\\<|<)', + re.IGNORECASE + ) + html_content = r_tag_pattern.sub('', html_content) + html_content = re.sub(r'', '', html_content, flags=re.IGNORECASE) + + indent_preservation_pattern = re.compile( + r'((?: |\s)+)(<(?:b|strong|i|em|u)\b[^>]*>)', + re.IGNORECASE + ) + html_content = indent_preservation_pattern.sub(r'\2\1', html_content) + + html_content = re.sub(r']*>', '##SUP##', html_content, flags=re.IGNORECASE) + html_content = re.sub(r'', '##/SUP##', html_content, flags=re.IGNORECASE) + html_content = re.sub(r']*>', '##SUB##', html_content, flags=re.IGNORECASE) + html_content = re.sub(r'', '##/SUB##', html_content, flags=re.IGNORECASE) + html_content = html_content.replace("|", r"\|").replace("
", "##NEWLINE##").replace("
", "##NEWLINE##") + + html_content = re.sub(r'', '', html_content, flags=re.DOTALL) + + html_content = fix_malformed_inline_paragraphs(html_content) + + u_tag_whitespace_pattern = re.compile( + r'(]*>)(\s* \s*)()', + re.IGNORECASE + ) + + html_content = u_tag_whitespace_pattern.sub(r'\1##SPACE##\3', html_content) + + i_tag_whitespace_pattern = re.compile( + r'(]*>)(\s* \s*)()', + re.IGNORECASE + ) + + html_content = i_tag_whitespace_pattern.sub(r'\1##I_SPACE##\3', html_content) + + try: + soup = BeautifulSoup(html_content, "lxml") + except ValueError as e: + if "not enough values to unpack" in str(e): + print(f"[Warning] lxml parser crashed on malformed attributes. Falling back to html.parser.") + soup = BeautifulSoup(html_content, "html.parser") + else: + raise + + convert_nested_div_table_to_placeholders(soup) + + _flatten_redundant_nesting(soup) + + convert_margin_layout_to_table(soup) + + fix_inverted_bold_paragraphs(soup) + + if is_document_layout_positioned(soup): + file_name = file_path.name if file_path else "unknown_file.html" + print(f"--> Detected positioned layout for '{file_name}'. Routing to OCR-based parser.") + return parse_html_via_pdf_render(html_content, file_name) + + final_markdown_from_ocr, is_fully_processed = parse_positioned_html_islands_via_ocr(soup) + + if is_fully_processed: + print("--> Document fully processed by island parser. Bypassing standard HTML parsing.") + return final_markdown_from_ocr, True + + convert_wingdings_boxes(soup) + + convert_vertical_align_superscripts(soup) + + convert_styled_inline_divs_to_spans(soup) + + soup = pre_fix_document_structure(soup) + + unwrap_fragmenting_tags(soup) + + for block_tag in soup.find_all(['div', 'p']): + + first_text_node = block_tag.find(string=True) + + if first_text_node and block_tag.get_text(strip=True).startswith(first_text_node.strip()): + text = str(first_text_node) + leading_ws_match = re.match(r'^([\s\u00A0\u2003]+)', text) + + if leading_ws_match: + ws_string = leading_ws_match.group(1) + indent_level = 0 + for char in ws_string: + if char in ['\u00A0', ' ']: + indent_level += 0.5 + elif char == '\u2003': + indent_level += 2 + + final_indent_level = int(indent_level) + if final_indent_level > 0: + indent_prefix = '##INDENT##' * final_indent_level + + block_tag.insert(0, NavigableString(indent_prefix)) + + first_text_node.replace_with(text.lstrip(' \u00A0\u2003')) + + _normalize_list_indentation(soup) + + convert_styled_superscripts_to_placeholders(soup) + + promote_styled_headings(soup) + + _debug_print("→ stage 0 (raw):", len(soup.find_all("table"))) + timeout_check = check_timeout(start_time, time_limit_s, "HTML pre-processing Stage 0-1") + if timeout_check is not None: + return timeout_check, True + + for text_node in soup.find_all(string=True): + s = str(text_node) + s = s.replace('\u00A0', ' ') + for z in ['\u200B', '\u200C', '\u200D', '\u2060', '\u2063', '\uFEFF']: + s = s.replace(z, '') + + + text_node.replace_with(s) + + title_tag = soup.find('title') + title_text = title_tag.text if title_tag else '' + is_form4 = 'Form 4' in title_text or 'form 4' in html_content[:1000].lower() + is_legacy = bool(soup.find(string=re.compile(r'statement of changes in beneficial ownership', re.I))) + is_modern_xml = bool(soup.find('ownershipDocument')) + + def _style_declares_bold(style: str) -> bool: + if not style: + return False + + style_lc = style.lower() + style_compact = style_lc.replace(' ', '') + + if any(token in style_compact for token in ( + 'font-weight:bold', + 'font-weight:700', + 'font-weight:800', + 'font-weight:900', + )): + return True + + font_decl_match = re.search(r'font\s*:\s*([^;]+)', style_lc) + if font_decl_match and re.search(r'(^|[\s/])(?:bold|700|800|900)(?=$|[\s/])', font_decl_match.group(1)): + return True + + return False + + for tr in soup.find_all('tr', style=True): + row_style = tr.get('style', '') + row_style_lc = row_style.lower().replace(' ', '') + inherited_bits = [] + + if _style_declares_bold(row_style): + inherited_bits.append('font-weight:bold') + if 'font-style:italic' in row_style_lc: + inherited_bits.append('font-style:italic') + if 'text-decoration:underline' in row_style_lc: + inherited_bits.append('text-decoration:underline') + + if not inherited_bits: + continue + + for cell in tr.find_all(['td', 'th'], recursive=False): + cell_style = cell.get('style', '') + cell_style_lc = cell_style.lower().replace(' ', '') + additions = [] + + for bit in inherited_bits: + if bit.startswith('font-weight:'): + if 'font-weight:' not in cell_style_lc: + additions.append(bit) + elif bit.startswith('font-style:'): + if 'font-style:' not in cell_style_lc: + additions.append(bit) + elif bit.startswith('text-decoration:'): + if 'text-decoration:' not in cell_style_lc: + additions.append(bit) + + if additions: + merged_style = cell_style.rstrip().rstrip(';') + if merged_style: + merged_style += '; ' + merged_style += '; '.join(additions) + cell['style'] = merged_style + + styled_tags = soup.find_all(['span', 'font', 'p', 'div', 'td', 'th'], style=True) + + for tag in styled_tags: + raw_style_str = tag.get('style', '') + style_str = raw_style_str.lower().replace(' ', '') + + is_bold = _style_declares_bold(raw_style_str) + is_italic = 'font-style:italic' in style_str + is_underline = 'text-decoration:underline' in style_str + + if not (is_bold or is_italic or is_underline): + continue + + if is_bold and tag.find_parent(['b', 'strong']): + is_bold = False + + if not (is_bold or is_italic or is_underline): + if tag.name in ['span', 'font']: + tag.unwrap() + continue + + inner_content_holder = soup.new_tag('div') + for child in list(tag.contents): + inner_content_holder.append(child.extract()) + + if is_underline: + new_u_tag = soup.new_tag('u') + new_u_tag.extend(inner_content_holder.contents) + inner_content_holder.clear() + inner_content_holder.append(new_u_tag) + + if is_italic: + new_i_tag = soup.new_tag('i') + new_i_tag.extend(inner_content_holder.contents) + inner_content_holder.clear() + inner_content_holder.append(new_i_tag) + + if is_bold: + new_b_tag = soup.new_tag('b') + new_b_tag.extend(inner_content_holder.contents) + inner_content_holder.clear() + inner_content_holder.append(new_b_tag) + + tag.clear() + tag.extend(inner_content_holder.contents) + + if tag.name in ['span', 'font']: + tag.unwrap() + + merge_whitespace_tags(soup) + + remove_empty_bold_tags(soup) + + + defragment_adjacent_tags(soup, ['b', 'strong']) + defragment_adjacent_tags(soup, ['i', 'em']) + defragment_adjacent_tags(soup, ['font']) + + process_inline_tags(soup, ['b', 'strong'], "BOLD") + process_inline_tags(soup, ['i', 'em'], "ITALIC") + process_inline_tags(soup, ['u'], "U") + process_anchor_tags(soup) + + colspan_rowspan_tag(soup) + + _debug_print("→ stage 1 (after bold cleanup):", len(soup.find_all("table"))) + timeout_check = check_timeout(start_time, time_limit_s, "HTML pre-processing Stage 1-2") + if timeout_check is not None: + return timeout_check, True + + xml_tags = soup.find_all(re.compile(r'^xml$', re.I)) + if xml_tags: + if soup.body is None or not soup.body.get_text(strip=True): + return parse_any_xml([t.decode_contents() for t in xml_tags]) + + _debug_print("→ stage 2 (after wingdings):", len(soup.find_all("table"))) + timeout_check = check_timeout(start_time, time_limit_s, "HTML pre-processing Stage 2-3") + if timeout_check is not None: + return timeout_check, True + + normalize_dl_lists(soup) + protect_special_chars_in_tables(soup) + + _debug_print("→ stage 3 (after list-table→li):", len(soup.find_all("table"))) + timeout_check = check_timeout(start_time, time_limit_s, "HTML pre-processing Stage 3-4") + if timeout_check is not None: + return timeout_check, True + + for tag in soup.find_all(["xml", "script", "style", "ix:header", "ix:resources"]): + tag.decompose() + + _debug_print("→ stage 4 (after xml/script/style/ix):", len(soup.find_all("table"))) + timeout_check = check_timeout(start_time, time_limit_s, "HTML pre-processing Stage 4-5") + if timeout_check is not None: + return timeout_check, True + + for tag in soup.find_all(attrs={"style": re.compile(r'display:\s*none', re.I)}): + tag.decompose() + + _debug_print("→ stage 5 (after display:none):", len(soup.find_all("table"))) + timeout_check = check_timeout(start_time, time_limit_s, "HTML pre-processing Stage (final)") + if timeout_check is not None: + return timeout_check, True + + total_tables = len(soup.find_all("table")) + next_milestone_pct = 10 + + for img in soup.find_all("img"): + src = img.get("src", "").strip() + if not src: + img.decompose() + continue + alt = img.get("alt", "").strip() + img.replace_with(NavigableString(f"![{alt}]({src})")) + + sections_md, text_buf = [], [] + BLOCK_TAGS = {"p", "div"} + HEADING_TAGS = {"h1", "h2", "h3", "h4", "h5", "h6"} + pending: List[str] = [] + last_emitted = None + + def _emit_pending(): + nonlocal last_emitted + if pending: + unique_pending = [p for p in pending if p.lstrip().rstrip() != last_emitted] + sections_md.extend(unique_pending) + if unique_pending: last_emitted = unique_pending[-1].lstrip().rstrip() + pending.clear() + + def flush(prefix: str = ""): + + raw_text = "".join(text_buf) + + txt = re.sub(r'\s+', ' ', raw_text).strip() + + if txt: + _emit_pending() + + final_text = re.sub(r'##SUP##(.*?)##/SUP##', r'\1', txt) + + if final_text.startswith(('http://', 'https://')): + sections_md.append(prefix + final_text + "\n\n") + else: + sections_md.append(prefix + final_text + "\n\n") + + text_buf.clear() + + def queue(level: int, cand: str, el): + tag = f"\n{'#' * level} {cand}\n" + if not pending or pending[-1] != tag: + pending.append(tag) + el.clear() + + body_tag = soup.body + body = body_tag + + if body_tag and body_tag.find(True): + body = body_tag + else: + body = soup + + is_13f_filing = "13F" in form_type.upper() + + for elem in body.descendants: + + if time.time() - start_time > time_limit_s: + print(f"[timeout] HTML parsing exceeded {time_limit_s // 60} minutes. Stopping.") + timed_out = True + break + + if elem.name == 'p' and elem.has_attr('style'): + style = elem.get('style', '').lower() + if 'border-bottom' in style and not elem.get_text(strip=True): + flush() + sections_md.append("\n\n------\n\n") + elem.clear() + continue + + if elem.name == 'div': + style = elem.get('style', '').lower() + if 'page-break-after: always' in style and not elem.get_text(strip=True): + flush() + sections_md.append("\n\n------\n\n") + elem.clear() + continue + + if elem.name in HEADING_TAGS: + flush() + raw_cand = elem.get_text(separator=' ', strip=True) + + cand = re.sub(r'\s+', ' ', raw_cand).strip() + + if cand and cand != last_emitted: + lvl = int(elem.name[1]) + tag = f"\n{'#' * lvl} {cand}\n" + sections_md.append(tag) + last_emitted = cand + elem.clear() + continue + + if elem.name == "li": + flush() + li_text = elem.get_text() + + if li_text: + if li_text.lstrip().startswith(tuple(BULLET_CHARS)): + sections_md.append("\n" + li_text) + else: + sections_md.append("* " + li_text) + + sections_md.append("\n\n") + + elem.clear() + continue + + if elem.name in BLOCK_TAGS or elem.name == "br": + flush() + continue + + if elem.name == "hr": + flush() + sections_md.append("\n\n------\n\n") + elem.clear() + continue + + if elem.name == "pre": + flush() + pre_text = elem.get_text() + if pre_text: + sections_md.append(pre_text) + elem.clear() + continue + + if elem.name == "table": + count += 1 + current_pct = (count / total_tables) * 100 + + if current_pct >= next_milestone_pct and total_tables > 10: + milestone_to_print = int(next_milestone_pct) + print(f"-> Processing tables... {milestone_to_print}% complete ({count} of {total_tables})") + + next_milestone_pct += 10.0 + flush() + + if handle_width_indented_list_table(elem, sections_md): + elem.clear() + continue + + if handle_list_like_table_with_indentation(elem, sections_md): + elem.clear() + continue + + if handle_sentence_fragment_table(elem, sections_md): + elem.clear() + continue + + tag_border_cells(elem, soup) + + DEFAULT_FONT_SIZE_PT = 10.0 + STANDARD_INDENT_EM = 1.2 + + for cell in elem.find_all(['td', 'th']): + elements_to_check = [cell] + cell.find_all(['p', 'div', 'font']) + + max_indent_pt = 0.0 + font_size_pt = None + + for el in elements_to_check: + indent_info = _calculate_effective_indent(el) + if indent_info['indent'] > max_indent_pt: + max_indent_pt = indent_info['indent'] + if indent_info['font_size']: + font_size_pt = indent_info['font_size'] + + if font_size_pt is None: + for el in elements_to_check: + indent_info = _calculate_effective_indent(el) + if indent_info['font_size']: + font_size_pt = indent_info['font_size'] + break + + effective_font_size = font_size_pt or DEFAULT_FONT_SIZE_PT + + if max_indent_pt > 0 and effective_font_size > 0: + indent_em = max_indent_pt / effective_font_size + ratio = indent_em / STANDARD_INDENT_EM + quantized_level = round(ratio * 4) / 4 + full_indents = int(quantized_level) + remainder = quantized_level - full_indents + + indent_prefix = "" + if full_indents > 0: + indent_prefix += '##INDENT##' * full_indents + + if remainder >= 0.75: + indent_prefix += '   ' + elif remainder >= 0.5: + indent_prefix += '  ' + + if indent_prefix: + cell.insert(0, NavigableString(indent_prefix)) + + table_text = elem.get_text(separator=' ', strip=True) + + if ITEM_HEADING.match(table_text): + queue(3, table_text, elem) + continue + + for cell in elem.find_all(['td', 'th']): + indent_level = 0 + text_indent_level = 0 + elements_to_check = [cell] + cell.find_all(['div', 'p'], recursive=False) + + for el in elements_to_check: + style = el.get('style', '') + if not style: continue + + pad_match = re.search(r'padding-left\s*:\s*([\d\.]+)(pt|px|em)', style) + margin_match = re.search(r'margin-left\s*:\s*([\d\.]+)(pt|px|em)', style) + + total_offset_pt = 0.0 + + if pad_match: + val, unit = float(pad_match.group(1)), pad_match.group(2) + if unit == 'em': total_offset_pt += val * 10.0 + elif unit == 'px': total_offset_pt += val * 0.75 + else: total_offset_pt += val + + if margin_match: + val, unit = float(margin_match.group(1)), margin_match.group(2) + if unit == 'em': total_offset_pt += val * 10.0 + elif unit == 'px': total_offset_pt += val * 0.75 + else: total_offset_pt += val + + if total_offset_pt > 0: + level = int(round(total_offset_pt / 5.0)) + if level > 0: + indent_level = level + break + + first_visible_text = None + for descendant in cell.descendants: + if not isinstance(descendant, NavigableString): + continue + descendant_text = str(descendant) + if descendant_text.strip(' \t\r\n\u00A0\u2003'): + first_visible_text = descendant_text + break + + if first_visible_text: + leading_ws_match = re.match(r'^([\s\u00A0\u2003]+)', first_visible_text) + if leading_ws_match: + ws_string = leading_ws_match.group(1) + ws_string = re.sub(r'[\r\n]+[ \t\f\v]*', '', ws_string) + indent_units = 0.0 + for char in ws_string: + if char in ['\u00A0', ' ']: + indent_units += 0.5 + elif char == '\u2003': + indent_units += 2.0 + text_indent_level = int(indent_units) + indent_level = max(indent_level, text_indent_level) + + for br in cell.find_all('br'): + br.replace_with('##NEWLINE##') + for p in cell.find_all(['p', 'div']): + p.append('##NEWLINE##') + + cell_text = re.sub(r'(?<=[A-Za-z0-9])-\s+', '- ', cell.get_text(strip=False).replace("**", "").replace("** ", "")) + if text_indent_level > 0: + cell_text = re.sub(r'^[\s\u00A0\u2003]+', '', cell_text) + if cell_text.strip() == "##NEWLINE##": + cell_text = "" + + IND = "\u2063" + if text_indent_level > 0 and not re.match(r'^(?:##INDENT##| )+', cell_text): + cell_text = ('##INDENT##' * text_indent_level) + cell_text + elif indent_level > 0: + cell_text = IND * indent_level + cell_text + + cell.clear() + cell.string = cell_text + + for cell in elem.find_all(['td', 'th']): + if cell.get('colspan'): + try: + if int(cell['colspan']) > 500: + del cell['colspan'] + except (ValueError, TypeError): + del cell['colspan'] + + for tr in elem.find_all('tr'): + if not tr.find(['td', 'th']): + tr.decompose() + + table_html = str(elem) + table_html = protect_numeric_list_items(table_html) + + table_html = _fix_escaped_malformed_font_tag(table_html) + + try: + df_from_html = pd.read_html(io.StringIO(table_html), flavor="lxml", keep_default_na=False, na_values=[""])[0] + + df_from_html.replace(to_replace=r'##PROTECT_(.*?)##', value=r'\1', regex=True, inplace=True) + + df_from_html = df_from_html.replace({'##VISUAL_BORDER##': ''}, regex=False) + + first_real_row_idx = 0 + for i, row in df_from_html.iterrows(): + is_junk = all( + str(cell).strip() in ('', 'nan', '', '', 'NaN') or 'spacer.gif' in str(cell) + for cell in row + ) + if not is_junk: + first_real_row_idx = i + break + + raw_df = df_from_html.iloc[first_real_row_idx:].reset_index(drop=True) + + raw_df = raw_df.replace(r'^\s*(?: )?\s*$', np.nan, regex=True) + + raw_df = (raw_df + .dropna(how='all') + .dropna(how='all', axis=1) + .reset_index(drop=True)) + + if not raw_df.empty: + for r in range(1, len(raw_df)): + for c in range(len(raw_df.columns)): + if isinstance(raw_df.iat[r, c], str) and '' in raw_df.iat[r, c]: + above_cell = raw_df.iat[r - 1, c] + if pd.isna(above_cell): + raw_df.iat[r - 1, c] = '' + else: + raw_df.iat[r - 1, c] = str(above_cell) + '' + + raw_df = raw_df.replace({r'': '', r'': ''}, regex=True) + + sup_replacer = lambda x: re.sub(r'##SUP##(.*?)##/SUP##', r'\1', str(x)) if '##SUP##' in str(x) else x + + raw_df = raw_df.applymap(sup_replacer) + + raw_df = drop_tag_only_rows_cols(raw_df).reset_index(drop=True) + + raw_df = ( + raw_df + .replace(r' -(?=[A-Za-z])', ' - ', regex=True) + .replace(r'\s{2,}', ' ', regex=True) + ) + + def normalize_for_comparison(val): + if isinstance(val, str): + text = val.replace('\u2063', '').replace('\u00A0', '') + return re.sub(r'\s+', ' ', text).strip() + + return val + + if not raw_df.empty and raw_df.shape[1] > 1: + cols_to_clean = raw_df.columns[1:] + raw_df[cols_to_clean] = raw_df[cols_to_clean].applymap(normalize_for_comparison) + + table_text = re.sub(r'##(BOLD_START_\d+|BOLD_END_\d+|U_START_\d+|U_END_\d+|ITALIC_START_\d+|ITALIC_END_\d+|ROWSPAN_\d+|COLSPAN_\d+|LINK_START_\d+__[^#]+|LINK_END_\d+)##', '', table_text) + table_text = table_text.replace('##NEWLINE##', '').replace('
', '').strip() + + positives = ( + (('$' in table_text or '£' in table_text or '�' in table_text or " ) " in table_text) and re.search(r'\d', table_text)) or + ('%' in table_text or re.search(r'\([\d,]+\)', table_text)) or + (len(table_text) > 300 and re.search(r'\d', table_text) and + "Part I" not in table_text and + "Name of each exchange on which registered" not in table_text and + "ITEM 1" not in table_text) or + any(k in table_text for k in ["Common stock", "Total", "By:", "Earnings", "##SUP", "##SUB", "marketing", "Period Ended", "Months Ended", "For Against"]) or + ((raw_df == ')').any().any()) + ) + + exclusions = any(k in table_text for k in ["Emerging growth company", "Smaller reporting company", "[One-month LIBOR +] __%", "⌧"]) + + is_financial_table = positives and not exclusions + + if is_financial_table and not is_13f_filing and not (is_form4 and is_legacy): + df_to_render = clean_financial_df(raw_df) + else: + df_to_render = raw_df + if "OO" in table_text and "CHECK" in table_text: + df_to_render = drop_active_colspan_empty_cols(df_to_render) + + md = df_to_markdown(df_to_render, disable_numparse=True, is_legacy_form4_table1=((is_form4 and is_legacy and "Table I" in df_to_render.to_string())), is_legacy_form4_table2=(is_form4 and is_legacy and "Table II" in df_to_render.to_string())) + + if md and not md.isspace(): + _emit_pending() + + if "|:-" in md: + sections_md.append(f"\n---\n\n{md}\n\n---\n") + else: + sections_md.append(f"{md}\n\n") + + except (ValueError, IndexError): + fallback_text = elem.get_text(separator=' ', strip=False) + if fallback_text: + _emit_pending() + sections_md.append(textwrap.fill(fallback_text) + "\n") + + elem.clear() + continue + + if isinstance(elem, NavigableString): + if not elem.find_parent(HEADING_TAGS.union({"li", "table", "script", "style"})): + text_buf.append(str(elem)) + + flush() + pending.clear() + + md = "".join(sections_md) + md = re.sub( + r"EX-[\d\.]+\s+\d+\s+[\w\.]+\.htm\s+EX-[\d\.]+\s+Document\s+" + r"created\s+using\s+Wdesk.*?Document", + "", + md, + flags=re.I, + ) + + md = re.sub(r'\n{3,}', '\n\n', md).strip() + + if timed_out: + return md + "\n\n", False + else: + return md, False + +def clean_phone_numbers(text: str) -> str: + """ + Removes newlines from within phone numbers and unifies formatting + by removing any Markdown bold tags from the number components. + """ + if not isinstance(text, str): + return text + + phone_pattern = re.compile(r""" + \*{0,2} + ( + \(\s*\d{3}\s*\) + ) + \*{0,2} + + \s*\n\s* + + \*{0,2} + ( + \d{3}\s*[-]?\s*\d{4} + ) + \*{0,2} + """, re.VERBOSE) + + return phone_pattern.sub(r"\1 \2", text) + +BULLETS = "○•●·◦➢▪" +BOLD_SPLIT_RE = re.compile(r'\*\* +') + +def _fix_paragraph_bold_runs(txt: str) -> str: + out, last = [], 0 + + for m in BOLD_SPLIT_RE.finditer(txt): + i = m.start() + + window = txt[max(0, i-150):i] + + if window.count("**") >= 2: + continue + if re.search(rf"[{BULLETS}]\s*\*\*\s*$", window): + continue + + out.append(txt[last:i].rstrip()) + out.append("\n\n**") + last = m.end() + + out.append(txt[last:]) + return "".join(out) + +def _convert_bullet_tables_to_lists(markdown_content: str) -> str: + """ + Finds and converts two-column Markdown tables that are used to format + bulleted lists into proper list items. + + e.g., | • | Some text... | -> • Some text... + """ + table_pattern = re.compile(r'\n---\n\n(.*?)\n\n---\n', re.S) + bullet_chars = {'○', '•', '●', '*', '·', '◦', '➢', '▪'} + + def replacer(match): + md_table_str = match.group(1) + lines = md_table_str.strip().split('\n') + + if len(lines) < 3: + return match.group(0) + + header_cells = [cell.strip() for cell in lines[0].strip('|').split('|')] + if any(header_cells): + return match.group(0) + + separator_cells = [cell.strip() for cell in lines[1].strip('|').split('|')] + if len(separator_cells) != 2: + return match.group(0) + + list_items = [] + is_bullet_table = True + for line in lines[2:]: + data_cells = [cell.strip() for cell in line.strip('|').split('|')] + if len(data_cells) != 2: + is_bullet_table = False + break + + bullet_part = data_cells[0] + text_part = data_cells[1] + + if bullet_part not in bullet_chars: + is_bullet_table = False + break + + list_items.append(f"{bullet_part} {text_part}") + + if is_bullet_table and list_items: + return "\n".join(list_items) + + return match.group(0) + + return table_pattern.sub(replacer, markdown_content) + +def _format_footnote_lists(markdown_content: str) -> str: + """ + Finds and formats footnotes that appear either as two-column tables + or as simple numbered lines, converting the number into a superscript. + """ + table_pattern = re.compile(r'\n---\n\n(.*?)\n\n---\n', re.S) + + def table_replacer(match): + md_table_str = match.group(1) + lines = md_table_str.strip().split('\n') + + if len(lines) != 3: + return match.group(0) + if any(cell.strip() for cell in lines[0].strip('|').split('|')): + return match.group(0) + if len(lines[1].strip('|').split('|')) != 2: + return match.group(0) + + data_cells = [cell.strip() for cell in lines[2].strip('|').split('|')] + if len(data_cells) != 2: + return match.group(0) + + number_part, text_part = data_cells + + num_match = re.fullmatch(r'(?:)?\s*(\d{1,2})\s*(?:)?', number_part) + + if num_match and text_part: + num_str = num_match.group(1) + return f"{num_str} {text_part}" + + return match.group(0) + + content = table_pattern.sub(table_replacer, markdown_content) + + footnote_line_pattern = re.compile(r'^(\d{1,2})\.\s+(?=[A-Z])', re.MULTILINE) + + content = footnote_line_pattern.sub(r'\1 ', content) + + return content + +def _remove_page_numbers(markdown_content: str) -> str: + """ + Removes page numbers from the document by targeting three patterns: + 1. Standalone integers on a line by themselves (e.g., "1", "A-1"). + 2. Standalone integers surrounded by hyphens (e.g., "-1-"). + 3. The above patterns when they are the sole content of a single-cell Markdown table. + The number must be less than 500 to be considered a page number. + This version also collapses the extra newlines left by the removal. + """ + + pattern_standalone = re.compile( + r"(\n{2,}|^)" + r"(?:)?" + r"[ \t]*" + r"(?:" + r"(?:[A-Z]+-)?(\d{1,3})(?:\.)?" + r"|" + r"-\s*(\d{1,3})\s*-" + r"|" + r"page\s+(\d{1,3})(?:\.)?" + r")" + r"[ \t]*" + r"(\n{2,}|$)", + re.MULTILINE | re.IGNORECASE + ) + + def replacer_standalone(m): + try: + num_str = m.group(2) or m.group(3) or m.group(4) + page_num = int(num_str) + if page_num < 500: + return "\n\n" + except (ValueError, TypeError, IndexError): + pass + return m.group(0) + + content = pattern_standalone.sub(replacer_standalone, markdown_content) + + pattern_table = re.compile(r'\n---\n\n(.*?)\n\n---\n', re.S) + + def replacer_table(match): + """Callback to check if a table block is just a page number.""" + table_content = match.group(1).strip() + lines = table_content.split('\n') + + if len(lines) == 3: + data_row = lines[2].strip() + cell_match = re.fullmatch( + r'\|\s*(?:(?:[A-Z]+-)?(\d{1,3})(?:\.)?|-\s*(\d{1,3})\s*-)\s*\|', + data_row + ) + if cell_match: + try: + num_str = cell_match.group(1) or cell_match.group(2) + page_num = int(num_str) + if page_num < 500: + return "\n\n" + except (ValueError, TypeError, IndexError): + pass + + return match.group(0) + + content = pattern_table.sub(replacer_table, content) + + return re.sub(r'\n{3,}', '\n\n', content) + +BULLET_BOLD_SPLIT_RE = re.compile( + rf"""(?x) + (^[^\n]{{0,150}}) # 1) a look-back window ≤150 chars, captured + \n+ # 2) the offending newline(s) + (\*\*[A-Za-z]) # 3) "**S" (opening bold + letter) + """, + re.M, +) + +def merge_adjacent_italics(s: str) -> str: + pair = re.compile(r"##ITALIC_END_(\d+)####ITALIC_START_(\d+)##") + while True: + m = pair.search(s) + if not m: + break + a, b = m.group(1), m.group(2) + start_tag = f"##ITALIC_START_{a}##" + idx = s.rfind(start_tag, 0, m.start()) + + before, after = s[:m.start()], s[m.end():] + + if idx != -1: + before = before[:idx] + f"##ITALIC_START_{b}##" + before[idx + len(start_tag):] + + if before and after and not before[-1].isspace() and not after[0].isspace(): + s = before + " " + after + else: + s = before + after + return s + +def merge_bracket_fragmented_underlines(s: str) -> str: + """ + Repairs underline placeholder runs that split bracketed text into + adjacent fragments, e.g.: + ##U_START_a##[##U_END_a####U_START_b##Reserved##U_END_b## + -> ##U_START_b##[Reserved##U_END_b## + + and the symmetric closing-bracket case. + """ + while True: + updated = s + updated = re.sub( + r'##U_START_(\d+)##\[\s*##U_END_\1##\s*##U_START_(\d+)##(.*?)##U_END_\2##', + r'##U_START_\2##[\3##U_END_\2##', + updated, + ) + updated = re.sub( + r'##U_START_(\d+)##(.*?)##U_END_\1##\s*##U_START_(\d+)##\]\s*##U_END_\3##', + r'##U_START_\1##\2]##U_END_\1##', + updated, + ) + if updated == s: + break + s = updated + return s + +def collapse_redundant_bold_placeholders(s: str) -> str: + """ + Collapses duplicated bold placeholder wrappers before they are restored + to literal markdown asterisks. + + This is intentionally done at the placeholder stage so escaped literal + asterisks in the source document are not touched. + """ + while True: + updated = s + + updated = re.sub( + r"##BOLD_START_\d+##" + r"((?:##(?:BOLD|ITALIC|U)_(?:START|END)_\d+##|\s)*)" + r"(##BOLD_START_(\d+)##.*?##BOLD_END_\3##)" + r"((?:##(?:BOLD|ITALIC|U)_(?:START|END)_\d+##|\s)*)" + r"##BOLD_END_\d+##", + r"\1\2\4", + updated, + flags=re.DOTALL, + ) + + updated = re.sub( + r"##BOLD_START_\d+##(?=(?:\s|##NEWLINE##)*##BOLD_START_\d+##)", + "", + updated, + ) + updated = re.sub( + r"(##BOLD_END_\d+##)(?:\s|##NEWLINE##)*##BOLD_END_\d+##", + r"\1", + updated, + ) + + if updated == s: + break + s = updated + return s + +def _post_process_text_cleanup(markdown_text: str, legacy_form4 = False) -> str: + """ + Final-stage clean-up for Markdown pulled from iXBRL/EDGAR filings. + + Returns a tidy Markdown string with: + • mojibake fixed + • hidden metadata lines removed + • common word-splits repaired + • normalised spacing & punctuation + """ + if not markdown_text: + return "" + + pattern = re.compile(r'(##BOLD_START_\d+##)(\s+)##BOLD_START_(\d+)##\((##BOLD_END_\3##)') + + replacement = r'\2\1(' + + markdown_text = pattern.sub(replacement, markdown_text) + + pattern = re.compile( + r'(##BOLD_START_(\d+)##)' + r'##BOLD_START_(\d+)##' + r'(\)?%|\))' + r'##BOLD_END_\3##' + r'##BOLD_END_\2##' + ) + + replacement = r'\1\4##BOLD_END_\2##' + + markdown_text = pattern.sub(replacement, markdown_text) + + markdown_text = markdown_text.replace("", "").replace("##SUP####/SUP##", "").replace("syste m,", "system,") + + pattern = r"(##BOLD_START_\d+##)\n\n" + + replacement = r"\n\n\1" + + markdown_text = re.sub(pattern, replacement, markdown_text) + + pattern = re.compile(r'\s+(##(?:BOLD|ITALIC|U)_END_\d+##)\s+([,.:;!?])') + markdown_text = pattern.sub(r'\1\2', markdown_text) + + markdown_text = re.sub( + r"##BOLD_START_\d+##(?:##(?:BOLD|ITALIC)_(?:START|END)_\d+##)*(##BOLD_START_(\d+)##.*?##BOLD_END_\2##)(?:##(?:BOLD|ITALIC)_(?:START|END)_\d+##)*##BOLD_END_\d+##", + r"\1", markdown_text, flags=re.DOTALL) + + markdown_text = re.sub( + r"##ITALIC_START_\d+##(?:##(?:BOLD|ITALIC)_(?:START|END)_\d+##)*(##ITALIC_START_(\d+)##.*?##ITALIC_END_\2##)(?:##(?:BOLD|ITALIC)_(?:START|END)_\d+##)*##ITALIC_END_\d+##", + r"\1", markdown_text, flags=re.DOTALL) + + markdown_text = re.sub(r"##BOLD_START_\d+##(##BOLD_START_\d+##)", r"\1", markdown_text) + markdown_text = re.sub(r"(##BOLD_END_\d+##)##BOLD_END_\d+##", r"\1", markdown_text) + + markdown_text = re.sub(r"##ITALIC_START_\d+##(##ITALIC_START_\d+##)", r"\1", markdown_text) + markdown_text = re.sub(r"(##ITALIC_END_\d+##)##ITALIC_END_\d+##", r"\1", markdown_text) + + markdown_text = markdown_text.replace(" ", " ").replace(" ", " ").replace(" ", " ") + + markdown_text = re.sub( + r'##NEWLINE##(?=(##BOLD_START_\d+##\)##BOLD_END_\d+##))', + r'', + markdown_text + ) + + markdown_text = merge_adjacent_italics(markdown_text) + + markdown_text = re.sub(r"##ITALIC_START_\d+##([○•●·◦➢])##ITALIC_END_\d+####ITALIC_START_(\d+)##", r"##ITALIC_START_\2##\1", markdown_text) + + pattern = r"(##BOLD_START_(\d+)####NEWLINE##)" + replacement = r"##NEWLINE####BOLD_START_\2##" + + markdown_text = markdown_text.replace("##NEWLINE##", "").replace("##NEWLINE##", "") + + _SWAP = re.compile(r'##NEWLINE##\s*((?:##(?:ITALIC|BOLD|U)_END_\d+##\s*)+)') + + markdown_text = _SWAP.sub(r'\1##NEWLINE## ', markdown_text) + + wrap_start = r'(?:##(?:ITALIC|BOLD)_START_\d+##)*' + wrap_end = r'(?:##(?:ITALIC|BOLD)_END_\d+##)*' + + roman_dot = r'(?i:[ivxlcdm]+)\.' + indent = r'(?:##INDENT##)*' + pair_paren = r'\([a-zA-Z]\)\([a-zA-Z]\)' + + marker_core = rf'(?:{pair_paren}|\*|\•|\d+\.\d[\d\.]*|\d+\.(?!\d)|\([a-zA-Z]\)|\((?i:[ivxlcdm]+)\)|\(\d+\)|{roman_dot}|[a-zA-Z]\.)' + + marker = rf'{indent}{marker_core}' + + pattern = re.compile( + rf'(?m)^({wrap_start}{marker}{wrap_end})[ \t]*\r?\n(?:[ \t]*\r?\n)*(?=\S)' + ) + + markdown_text = pattern.sub(r'\1 ', markdown_text) + + pattern = re.compile( + r'##BOLD_END_(\d+)##(?: )?' + r'##BOLD_START_\d+##' + r'(.*?)' + r'##BOLD_END_\d+##', + re.DOTALL + ) + + markdown_text = pattern.sub(r'\2##BOLD_END_\1##', markdown_text) + + pattern = re.compile( + r'##BOLD_END_(\d+)##(?: )?' + r'##SUB####BOLD_START_\d+##' + r'(.*?)' + r'##BOLD_END_\d+####/SUB##', + re.DOTALL + ) + + markdown_text = pattern.sub(r'##SUB##\2##/SUB####BOLD_END_\1##', markdown_text) + + markdown_text = re.sub(r'(##BOLD_START_\d+##•##BOLD_END_\d+##) (##BOLD_START_\d+##•##BOLD_END_\d+##)(\s?)', r'\1\n\n\2\3', markdown_text) + + markdown_text = re.sub( + r'##BOLD_START_(\d+)##\s*\(\s*##BOLD_END_\1##\s*##BOLD_START_(\d+)##(.*?)##BOLD_END_\2##', + r'##BOLD_START_\2##(\3##BOLD_END_\2##', + markdown_text + ) + + markdown_text = re.sub( + r'##BOLD_END_(\d+)##(?:##NEWLINE##\s*)?\s*##BOLD_START_\d+##(\s?(?:\)%|\)|%|,))(\s*)##BOLD_END_\d+##', + r'\2\3##BOLD_END_\1##', + markdown_text, + ) + + markdown_text = re.sub(r' (##(?:BOLD|U|ITALIC)_END_\d+##)', r'\1 ', markdown_text) + + markdown_text = re.sub(r'(?m)^•(?=\S)', '• ', markdown_text) + + markdown_text = re.sub( + r'\.##BOLD_END_(\d+)####U_START', + r'.##BOLD_END_\1## ##U_START', + markdown_text + ) + + markdown_text = re.sub(r'##ITALIC_START_(\d+)####I_SPACE####ITALIC_END_\1##', ' ', markdown_text) + markdown_text = re.sub(r'(##ITALIC_START_\d+##)(##I_SPACE##)', r'\2\1', markdown_text) + markdown_text = re.sub(r'(##I_SPACE##)(##ITALIC_END_\d+##)', r'\2\1', markdown_text) + + markdown_text = re.sub(r'(\d{1,2}\.\d+[A-Z]?\.?)(##ITALIC_START_\d+##)', r'\1 \2', markdown_text) + + markdown_text = re.sub( + r'((?:##(?:BOLD|ITALIC|U)_END_\d+##[^\S\r\n]*)+)(?=\S)', + lambda m: re.sub(r'[^\S\r\n]+', '', m.group(1)) + (' ' if re.search(r'[^\S\r\n]', m.group(1)) else ''), + markdown_text + ) + + markdown_text = re.sub( + r'(##(?:BOLD|ITALIC|U)_START_\d+##)\s', + r' \1', + markdown_text + ) + + + markdown_text = markdown_text.replace("\u00a0", " ") + mojibake = { + "â\x80\x94": "—", + "â\x80\x93": "–", + "â\x80\x99": "'", + "â\x80\x98": "'", + "â\x80\x9c": '"', + "â\x80\x9d": '"', + "â\x80¦": "...", + "â�™": "'", + "â�œ": '"', + "â�d": '"', + "â� ": '"', + "â�”": "—", + "â�“": "–", + "â�‰": " ", + "â�¦": "...", + "”": '"', + "“": '"', + "’": "'", + "‘": "'", + } + for bad, good in mojibake.items(): + markdown_text = markdown_text.replace(bad, good) + + + markdown_text = clean_phone_numbers(markdown_text) + + + junk_patterns = [ + re.compile(r'^\s*(?:<\??\s*)?xml\s+version\s*=\s*[\'"]\s*1\.0\s*[\'"].*$', re.IGNORECASE | re.MULTILINE), + re.compile( + r"^\s*\*{0,2}000\d{7}[^\n]*(?:Q[1-4]|FY|10-K|10-Q)[^\n]*false\*{0,2}\s*$", + re.IGNORECASE | re.MULTILINE, + ), + re.compile(r"^.*XBRL Document Created with.*$", re.MULTILINE), + + re.compile(r'^.*(?:Created by|Powered by|Unique Code|Generated At).*$', re.IGNORECASE | re.MULTILINE), + + re.compile(r"^false\d{4}FY\d+.*http://fasb\.org.*$", re.IGNORECASE | re.MULTILINE), + re.compile( + r"^(?!\s*\|).*\[(?:Member|Axis|Domain|Line Items|Abstract|Table|Text Block|" + r"Policy Text Block|Extensible Enumeration|Flag|Roll Forward)\].*$", + re.IGNORECASE | re.MULTILINE, + ), + ] + + for pat in junk_patterns: + markdown_text = pat.sub("", markdown_text) + + split_fixes = { + r"##NEWLINE##": "
" + } + for bad_re, good in split_fixes.items(): + markdown_text = re.sub(bad_re, good, markdown_text, flags=re.IGNORECASE) + + markdown_text = re.sub(r'(
[ \t]*){2,}', '
', markdown_text, flags=re.IGNORECASE) + + + table_pattern = re.compile(r'\n---\n\n(.*?)\n\n---\n', re.S) + + def remove_empty_tables_replacer(match): + table_content = match.group(1) + content_check = re.sub(r'[|\-:\s—–]', '', table_content) + if not content_check: + return '' + return match.group(0) + + markdown_text = table_pattern.sub(remove_empty_tables_replacer, markdown_text) + + markdown_text = re.sub( + r'(##BOLD_END_\d+##)' + r'((?:
|##NEWLINE##|##COLSPAN_\d+##|\s)*)' + r'(##BOLD_START_\d+##)' + r'(\)?%|\))' + r'(##BOLD_END_\d+##)', + r' \4\1\2', + markdown_text + ) + + markdown_text = re.sub(r"[ \t]+", " ", markdown_text) + + markdown_text = re.sub(r"\n{3,}", "\n\n", markdown_text) + markdown_text = re.sub(r"(?)", ")").replace("
%", "%").replace("$
", "$").replace("##BOL
D", "##BOLD") + + link_pattern = re.compile(r'^\s*link\d+\s+".*?"\s*\n?', re.MULTILINE) + + markdown_text = link_pattern.sub("", markdown_text) + + markdown_text = markdown_text.replace("•
", "• ").replace("●
", "● ").replace("·
", "· ").replace("◦
", "◦ ").replace("➢
", "➢ ") + + BULLET_RUN_RE = re.compile( + r'((?:^|(?:##INDENT##)+)\s*[○•●·◦➢▪])' + r'(?:\s*(?:
\s*|\n\s*)){2,}', + re.MULTILINE + ) + + markdown_text = BULLET_RUN_RE.sub(r'\1 ', markdown_text) + + markdown_text = _fix_paragraph_bold_runs(markdown_text) + + CHECK_RUN_RE = re.compile( + r'([✓✔✘])' + r'(?:\s*
\s*|\s*\n\s*)+' + ) + markdown_text = CHECK_RUN_RE.sub(r'\1 ', markdown_text) + + markdown_text = re.sub( + r'(##BOLD_START_\d+##TABLE OF CONTENTS##BOLD_END_\d+##)|\bTABLE OF CONTENTS\b', + lambda m: m.group(1) or '**TABLE OF CONTENTS**', + markdown_text, + ) + + markdown_text = markdown_text.replace("##SUP##", "").replace("##/SUP##", "").replace("##SUP##", "").replace("##/SUP##", "") + markdown_text = markdown_text.replace("##SUB##", "").replace("##/SUB##", "").replace("##SUB##", "").replace("##/SUB##", "") + + + markdown_text = re.sub(r'##BOLD_START_\d+##(?=\s*
##BOLD_START)', '', markdown_text) + + markdown_text = re.sub(r'(##BOLD_END_\d+##)(##BOLD_START_\d+##)', r'\1 \2', markdown_text) + + markdown_text = re.sub(r'(\d+)\s+(%##BOLD_END_)', r'\1\2', markdown_text) + + markdown_text = re.sub(r'(##BOLD_START_\d+##)\s+', r' \1', markdown_text) + markdown_text = re.sub(r'\s+(##BOLD_END_\d+##)', r'\1', markdown_text) + markdown_text = collapse_redundant_bold_placeholders(markdown_text) + + markdown_text = re.sub(r'(?<=\S)##BOLD_START_\d+##', r'**', markdown_text) + markdown_text = re.sub(r'##BOLD_START_\d+##', r'**', markdown_text) + + markdown_text = re.sub(r'##BOLD_END_\d+##(?=\S)', r'**', markdown_text) + markdown_text = re.sub(r'##BOLD_END_\d+##', r'**', markdown_text) + + markdown_text = re.sub(r'(##ITALIC_START_\d+##)\s+', r'\1', markdown_text) + markdown_text = re.sub(r'\s+(##ITALIC_END_\d+##)', r'\1', markdown_text) + markdown_text = re.sub(r'(?<=\S)##ITALIC_START_\d+##', r'*', markdown_text) + markdown_text = re.sub(r'##ITALIC_START_\d+##', r'*', markdown_text) + markdown_text = re.sub(r'##ITALIC_END_\d+##(?=\S)', r'*', markdown_text) + markdown_text = re.sub(r'##ITALIC_END_\d+##', r'*', markdown_text) + + markdown_text = merge_bracket_fragmented_underlines(markdown_text) + + markdown_text = re.sub(r'(##U_START_\d+##)\s+', r'\1', markdown_text) + markdown_text = re.sub(r'\s+(##U_END_\d+##)', r'\1', markdown_text) + markdown_text = re.sub(r'(?<=\S)##U_START_\d+##', r'', markdown_text) + markdown_text = re.sub(r'##U_START_\d+##', r'', markdown_text) + markdown_text = re.sub(r'##U_END_\d+##(?=\S)', r'', markdown_text) + markdown_text = re.sub(r'##U_END_\d+##', r'', markdown_text) + markdown_text = _restore_markdown_links(markdown_text) + + markdown_text = markdown_text.replace("$ **", "**$").replace("**
**)**", ")**") + + pattern = r'^(Table of Contents)\s*•\s*(.+)$' + replacement = r'\1\n\n• \2' + + markdown_text = re.sub(pattern, + replacement, + markdown_text, + flags=re.IGNORECASE | re.MULTILINE) + + markdown_text = re.sub(r'(?", "_").replace(" _", "_") + + pattern = r'Unnamed:\s*\d+(?:_level_\d+)?\s' + markdown_text = re.sub(pattern, '', markdown_text) + + markdown_text = re.sub(r"\.\d+", "", markdown_text) + markdown_text = re.sub(r"
\.\d+(?![%\d])", "", markdown_text) + + markdown_text = markdown_text.replace("", "") + + if legacy_form4: + bad_table2_row2 = "| 1. Title of Derivative Security
(Instr. 3)
| 2. Conver-
sion or
Exercise
Price of
Deri-
vative
Security
| 3. Transaction Date
(Month/
Day/
Year)
| 3A. Deemed Execution Date, if any
(Month/
Day/
Year)
| Code
| V
A
D DE
ED
Title
Amount or Number of Shares
| 8. Price
of
Derivative
Security
(Instr.5)
| 9. Number of
Derivative
Securities
Beneficially
Owned
Following
Reported
Transaction(s)
(Instr.4) | 10. Owner-
ship
Form of
Deriv-
ative
Securities:
Direct (D)
or
Indirect (I)
(Instr.4) | 11. Nature of
Indirect
Beneficial
Ownership
(Instr.4) | | | | | |" + fixed_table2_row2 = "| 1. Title of Derivative Security
(Instr. 3)
| 2. Conver-
sion or
Exercise
Price of
Deri-
vative
Security
| 3. Transaction Date
(Month/
Day/
Year)
| 3A. Deemed Execution Date, if any
(Month/
Day/
Year)
| Code
| V | A | D | DE | ED | Title | Amount or Number of Shares | 8. Price
of
Derivative
Security
(Instr.5)
| 9. Number of
Derivative
Securities
Beneficially
Owned
Following
Reported
Transaction(s)
(Instr.4) | 10. Owner-
ship
Form of
Deriv-
ative
Securities:
Direct (D)
or
Indirect (I)
(Instr.4) | 11. Nature of
Indirect
Beneficial
Ownership
(Instr.4) |" + markdown_text = markdown_text.replace(bad_table2_row2, fixed_table2_row2) + + markdown_text = _convert_bullet_tables_to_lists(markdown_text) + + footnote_spacing_pattern = re.compile( + r'(?<=[^\n])' + r'\n' + r'(\d+.*)', + re.MULTILINE + ) + + markdown_text = footnote_spacing_pattern.sub(r'\n\n\1', markdown_text) + + markdown_text = markdown_text.replace("**\n•", "**\n\n•").replace("** 1. ", "**\n\n1. ") + markdown_text = markdown_text.replace(" ##COLSPAN", "##COLSPAN").replace(" ##ROWSPAN", "##ROWSPAN") + markdown_text = markdown_text.replace("p>", "
").replace(" ", "").replace(" ", "").replace("0.%", "0.0%").replace("ER>", "") + markdown_text = re.sub(r'SPAN##[1-9](?!\d)', 'SPAN##', markdown_text) + markdown_text = re.sub( + r'(?m)^QuickLinks(?: -- Click here to rapidly navigate through this document)?\r?\n\n', + '', + markdown_text + ) + markdown_text = _remove_page_numbers(markdown_text) + markdown_text = re.sub(r'(?m)^[ \t]*(?:100|[1-9]?\d)\s+QuickLinks[ \t]*\r?$', '', markdown_text) + markdown_text = markdown_text.replace(' " *', ' "*').replace('* " ', '*" ').replace(' (" *', ' ()"*').replace('* ") ', '*") ').replace("**(** ", "**(**") + markdown_text = markdown_text.replace("##TRIPLE_ASTERISK##", "\\*\\*\\*") + markdown_text = markdown_text.replace("##DOUBLE_ASTERISK##", "\\*\\*") + markdown_text = markdown_text.replace("##SINGLE_ASTERISK##", "\\*") + + + markdown_text = re.sub(r'(?m)^((?:[A-Za-z]\))|(?:[1-9]\d?\.))([A-Za-z])', r'\1 \2', markdown_text) + + + markdown_text = re.sub(r'(\s*\d+\s*)\s*\s+(?=[A-Za-z0-9])', r'\1 ', markdown_text, flags=re.I) + markdown_text = re.sub( + r'\s*(\((?:\d+|[A-Za-z]+))\s*\s*\s*\s*(\))\s*', + r'\1\2', + markdown_text, + flags=re.I, + ) + + pattern = r'(?m)(^[○•●·◦➢])(?=\S)' + + markdown_text = re.sub(pattern, r'\1 ', markdown_text) + markdown_text = re.sub(r'(?:\n\n------){2,}', '\n\n------', markdown_text) + + pattern = r'([○•●·◦➢])\s*\s*' + markdown_text = re.sub(pattern, r'\1 ', markdown_text) + + markdown_text = markdown_text.replace(" ", " ").replace("", "") + + markdown_text = re.sub(r"##COLSPAN_\d+##", "", markdown_text) + + pattern = r'(?m)^(\(\d+\))(?!\s)(\S)' + markdown_text = re.sub(pattern, r'\1 \2', markdown_text) + + markdown_text = apply_markdown_hardcodes(markdown_text) + + return markdown_text.strip() + +_pre_tag_re = re.compile(r'(?is).*?') + +def _extract_pre_blocks(text: str, stash, + ph_fmt="__PRE_BLOCK_{:03d}__") -> str: + """Replace every
with a unique placeholder and store the block.""" + def _sub(m): + idx = len(stash) + stash.append(m.group(0)) + return ph_fmt.format(idx) + return _pre_tag_re.sub(_sub, text) + +def parse_legacy_13f_hr_txt(raw_text: str) -> str: + """ + Router function to detect the 13F-HR text format and call the correct parser. + """ + if ("----------------" in raw_text and "NAME OF ISSUER" in raw_text) or "x x" not in raw_text: + print("--> Detected fixed-width format.") + return parse_plaintext_filing(raw_text) + else: + print("--> Detected free-form (OCR-style) format.") + return _parse_free_form_13f_hr(raw_text) + +def _parse_free_form_13f_hr(raw_text: str) -> str: + """ + Parses legacy 13F-HR filings that are in a free-form, OCR-like text format. + """ + import re, pandas as pd + + CUSIP_RE = re.compile(r'\b([0-9A-Za-z]{8}[0-9])\b') + NUM_RE = re.compile(r'(\d{1,3}(?:,\d{3})*)') + TITLE_CANDIDATES = [ + "Common Stock","Common","ADR","SPON ADR","Spon ADR","Spons ADR", + "Preferred","PRFD","Convertible","Convert","Convert Bond","ConvertBond", + "Debenture","Notes" + ] + HEADERS = [ + 'NAME OF ISSUER','TITLE OF CLASS','CUSIP','VALUE (x$1000)', + 'SHRS OR PRN AMT','SH/PRN','PUT/CALL','INVESTMENT DISCRETION','OTHER MANAGER', + 'VOTING AUTHORITY (SOLE)','VOTING AUTHORITY (SHARED)','VOTING AUTHORITY (NONE)' + ] + + def smart_title_case(text: str) -> str: + words = text.split() + title_cased_words = [] + for word in words: + if word.isupper() and len(word) > 1: + title_cased_words.append(word) + else: + title_cased_words.append(word.capitalize()) + return ' '.join(title_cased_words) + + def _normalize_number_token(s: str) -> str: + return s.replace(',', '') + + def _pick_title(text: str) -> str: + up = text.upper() + best = None + for t in TITLE_CANDIDATES: + if t.upper() in up: + if not best or up.rfind(t.upper()) > up.rfind(best.upper()): + best = t + return best or "Common" + + table_start = re.search(r'Form 13F Information Table', raw_text, re.I | re.S) + if not table_start: + return "" + + cover = raw_text[:table_start.start()] + cover = "\n".join(l.strip() for l in cover.splitlines() if l.strip()) + cover = re.sub(r'FORM 13F\s+FORM 13F', '## FORM 13F (Some records may not parsed due to OCR errors in the original filing)', cover, flags=re.I) + cover = re.sub(r'COVER PAGE', '\n\n### COVER PAGE', cover, flags=re.I) + cover = re.sub(r'SUMMARY PAGE', '\n\n### SUMMARY PAGE', cover, flags=re.I) + cover = re.sub(r'Name:', '\n\n**Name:**', cover, flags=re.I) + cover = re.sub(r'Address:', '\n\n**Address:**', cover, flags=re.I) + cover = re.sub(r'Person signing this report on behalf of Reporting Manager:', + '\n\n**Person signing this report on behalf of Reporting Manager:**', + cover, flags=re.I) + + blob = raw_text[table_start.end():] + blob = re.sub(r'|', ' ', blob, flags=re.I) + blob = re.sub(r'\s+', ' ', blob).strip() + blob = re.sub(r'(\d),\s+(\d{3})', r'\1,\2', blob) + blob = re.sub(r'(\d{1,3}),\s+(\d)\s+(\d{3}\b)', r'\1 \2,\3', blob) + + matches = list(CUSIP_RE.finditer(blob)) + holdings = [] + for i, m in enumerate(matches): + start = m.start() + end = matches[i+1].start() if i+1 < len(matches) else len(blob) + rec = blob[start:end].strip() + + cusip = m.group(1).upper() + rest = rec[len(m.group(1)):].strip() + + nums = NUM_RE.findall(rest) + value_tok = nums[0] if len(nums) >= 1 else '0' + shares_tok = nums[1] if len(nums) >= 2 else '0' + + first_num_pos = rest.find(value_tok) if nums else -1 + + issuer_title_text = rest + if first_num_pos != -1: + issuer_title_text = rest[:first_num_pos].strip() + else: + if ' x' in rest.lower(): + issuer_title_text = rest.lower().split(' x')[0].strip() + + value_raw = _normalize_number_token(value_tok) + shares_raw = _normalize_number_token(shares_tok) + + title = _pick_title(issuer_title_text) + issuer_text = re.sub(re.escape(title) + r'$', '', issuer_title_text, flags=re.I).strip() + issuer_text = re.sub(r'\bSpons?\b\s*A(?:DR)?\b$', '', issuer_text, flags=re.I).strip() + issuer_text = re.sub(r'\s{2,}', ' ', issuer_text) + + issuer = smart_title_case(issuer_text) + + x_count = len(re.findall(r'\bx\b', rec, flags=re.I)) + inv_disc = "SOLE" if x_count >= 1 else "—" + voting_sole = int(shares_raw) if (x_count >= 1 and shares_raw.isdigit() and int(shares_raw) > 0) else 0 + + is_prn = any(k in title.upper() for k in ("CONVERT", "DEBENT", "NOTES")) + row = { + 'NAME OF ISSUER': issuer or '—', + 'TITLE OF CLASS': 'ConvertBond' if 'CONVERT' in title.upper() else (title or 'Common'), + 'CUSIP': cusip, + 'VALUE (x$1000)': f"{int(value_raw):}" if value_raw.isdigit() and int(value_raw) > 0 else '—', + 'SHRS OR PRN AMT': f"{int(shares_raw):}" if shares_raw.isdigit() and int(shares_raw) > 0 else '—', + 'SH/PRN': 'PRN' if is_prn else 'SH', + 'PUT/CALL': '—', + 'INVESTMENT DISCRETION': inv_disc, + 'OTHER MANAGER': '—', + 'VOTING AUTHORITY (SOLE)': f"{voting_sole:,}" if voting_sole > 0 else '—', + 'VOTING AUTHORITY (SHARED)': '—', + 'VOTING AUTHORITY (NONE)': '—', + } + holdings.append(row) + + if not holdings: + return f"{cover}\n\n" + + df = pd.DataFrame(holdings).reindex(columns=HEADERS).fillna('—') + return f"{cover}\n\n{to_compact_markdown(df, index=False)}" + +def _process_image_bytes_with_mistral( + image_bytes: bytes, + file_name: str, + page_no: int, + *, + per_table_sleep_s: float, + mistral_api_key: Optional[str] = None, +): + """ + Processes a single rendered page image using Mistral OCR. + This is an adaptation of your PDF processing logic for a single image. + """ + try: + def _run_page_ocr(*, client: Mistral, api_key: str, key_spec: Dict[str, Any]): + up = client.files.upload(file={"file_name": f"page_{page_no}_{file_name}", "content": image_bytes}, purpose="ocr") + if not up or not up.id: + raise Exception("Image upload failed to return a valid ID.") + + signed_url = get_signed_url_with_retry(client, file_id=up.id) + headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"} + payload = _build_mistral_ocr_payload(signed_url) + max_ocr_retries = 4 + ocr_delay = 2.0 + + for attempt in range(max_ocr_retries): + response = requests.post(OCR_API_URL, headers=headers, json=payload, timeout=600) + + if response.status_code in [429, 500, 502, 503, 504]: + if attempt < max_ocr_retries - 1: + print(f"OCR API error {response.status_code}. Retrying in {ocr_delay:.2f}s... (Attempt {attempt + 1}/{max_ocr_retries})") + time.sleep(ocr_delay) + ocr_delay *= 2 + ocr_delay += random.uniform(0, 0.5) + continue + + response.raise_for_status() + break + + ocr_data = response.json() + usage = _summarize_ocr_usage(ocr_data, response.headers) + if not ocr_data.get("pages"): + print(f"[page {page_no}] OCR returned no content.") + return "", usage, key_spec["env_name"] + + page_obj = ocr_data["pages"][0] + return _pick_text(page_obj), usage, key_spec["env_name"] + + page_text, usage, used_env_name = _run_with_mistral_key_rotation( + f"rendered page {page_no} for {file_name}", + _run_page_ocr, + explicit_api_key=mistral_api_key, + ) + _record_mistral_key_success(used_env_name, usage=usage, explicit_api_key=mistral_api_key) + return page_text + + except Exception as e: + print(f"API processing for page {page_no} failed and was skipped. Error: {e}") + return f"" + +def parse_html_via_ocr(filepath: pathlib.Path) -> str: + """ + High-quality OCR-based parser for positioned HTML. Renders each page to an + image and uses Mistral OCR to extract text and tables. + """ + _load_sec_parser_env() + if not _has_mistral_api_keys(): + print(f"{_mistral_no_keys_message()} Skipping OCR processing.") + return "" + + _log_current_filing_ocr("html_image_ocr") + html_content = filepath.read_text(encoding='utf-8', errors='replace') + try: + soup = BeautifulSoup(html_content, "lxml") + except ValueError as e: + if "not enough values to unpack" in str(e): + print(f"[Warning] lxml parser crashed on malformed attributes. Falling back to html.parser.") + soup = BeautifulSoup(html_content, "html.parser") + else: + raise + + pages = soup.find_all('div', id=re.compile(r'^pf\w+$')) + if not pages: + print("Could not find page containers (e.g.,
). Treating document as a single page.") + pages = [soup] + + md_parts = [] + + options = { + 'format': 'png', + 'quality': '100', + 'width': '1224', + 'disable-smart-width': '' + } + + print(f"Found {len(pages)} pages to process via OCR.") + for i, page_soup in enumerate(pages, 1): + print(f"--> Rendering and processing page {i}...") + try: + image_bytes = imgkit.from_string(str(page_soup), False, options=options) + + page_content = _process_image_bytes_with_mistral( + image_bytes=image_bytes, + file_name=filepath.name, + page_no=i, + per_table_sleep_s=Config.PER_TABLE_SLEEP_SECONDS, + mistral_api_key=None, + ) + md_parts.append(page_content) + + except Exception as e: + error_msg = f"Failed to render or process page {i}: {e}" + print(error_msg) + md_parts.append(f"") + + return "\n\n------\n\n".join(md_parts) + +def parse_series_and_classes_sgml(header_content: str) -> str: + """ + Parses the SGML block from a + filing header into a structured Markdown output. + """ + sgml_match = re.search( + r"(.*?)", + header_content, + re.S | re.I + ) + + if not sgml_match: + return "" + + sgml_content = sgml_match.group(1) + md_parts = ["## Series and Classes Contracts Data"] + + series_blocks = re.split(r'', sgml_content, flags=re.I)[1:] + if not series_blocks: + return "" + + for series_block in series_blocks: + series_name_match = re.search(r'\s*([^\n<]+)', series_block, re.I) + series_id_match = re.search(r'\s*([^\n<]+)', series_block, re.I) + + series_name = series_name_match.group(1).strip() if series_name_match else "—" + series_id = series_id_match.group(1).strip() if series_id_match else "—" + + md_parts.append(f"\n### {series_name} (Series ID: {series_id})") + + class_records = [] + class_contract_blocks = re.findall( + r'(.*?)(?=||$)', + series_block, + re.S | re.I + ) + + for class_block in class_contract_blocks: + id_match = re.search(r'\s*([^\n<]+)', class_block, re.I) + name_match = re.search(r'\s*([^\n<]+)', class_block, re.I) + ticker_match = re.search(r'\s*([^\n<]+)', class_block, re.I) + + record = { + 'Class ID': id_match.group(1).strip() if id_match else "—", + 'Class Name': name_match.group(1).strip() if name_match else "—", + 'Ticker Symbol': ticker_match.group(1).strip() if ticker_match else "—", + } + class_records.append(record) + + if class_records: + df = pd.DataFrame(class_records) + md_parts.append(to_compact_markdown(df, index=False)) + + return "\n\n".join(md_parts) + +def parse_legacy_paper_filing(raw_text: str, form_type: str) -> str: + """ + Parses various legacy plain-text paper filings (ADV series, MSDW, etc.). + All data is contained within the or block, and + it also captures any text from the block. + """ + form_titles = { + "ADV": "FORM ADV: Uniform Application for Investment Adviser Registration", + "ADV/A": "FORM ADV/A: Amendment to Form ADV", + "ADV-E": "FORM ADV-E: Certificate of Accounting of Client Securities and Funds", + "ADV-H-T": "FORM ADV-H-T: Application for a Temporary Hardship Exemption", + "ADV-H-C": "FORM ADV-H-C: Application for a Continuing Hardship Exemption", + "ADV-NR": "FORM ADV-NR: Appointment of Agent for Service of Process by Non-Resident Adviser", + "ADVW": "FORM ADVW: Notice of Withdrawal from Registration as Investment Adviser", + "ADVCO": "FORM ADVCO: Correction to an ADV Filing", + "MSDW": "FORM MSDW: Notice of Withdrawal from Registration as a Municipal Securities Dealer" + } + title = form_titles.get(form_type, f"Form {form_type}") + + md_parts = [ + "### UNITED STATES SECURITIES AND EXCHANGE COMMISSION\n" + "**Washington, D.C. 20549**\n\n" + f"## {title}\n" + ] + + header_match = re.search(r"<(?:SEC|IMS)-HEADER>(.*?)", raw_text, re.S | re.I) + if not header_match: + md_parts.append(f"") + else: + header_content = header_match.group(1).strip() + for line in header_content.splitlines(): + line = line.strip() + if not line: + continue + + if not line.startswith('\t') and line.endswith(':'): + section_name = line.rstrip(':').strip() + if section_name in ["FILER", "COMPANY DATA", "FILING VALUES", "BUSINESS ADDRESS", "MAIL ADDRESS", "FORMER COMPANY"]: + md_parts.append(f"\n### {section_name.title()}") + continue + + if ':' in line: + key, value = line.split(':', 1) + key = key.strip() + value = value.strip() + if key and value: + md_parts.append(f"**{key}:** {value}") + + doc_match = re.search(r"(.*?)", raw_text, re.S | re.I) + if doc_match: + doc_content = doc_match.group(1) + text_match = re.search(r"(.*?)", doc_content, re.S | re.I) + if text_match: + doc_text = text_match.group(1).strip() + if doc_text: + md_parts.append("\n### Document Note") + blockquote_lines = [f"> {line.strip()}" for line in doc_text.splitlines() if line.strip()] + md_parts.append("\n".join(blockquote_lines)) + + return "\n".join(md_parts) + +def _extract_class_name_map_from_header_content(header_content: str) -> dict: + if not header_content: + return {} + try: + header_soup = BeautifulSoup(header_content, 'lxml') + except ValueError as e: + if "not enough values to unpack" in str(e): + print("[Warning] lxml crashed while parsing header contract data. Falling back to html.parser.") + header_soup = BeautifulSoup(header_content, 'html.parser') + else: + raise + + class_name_map = {} + if (scd := header_soup.find('series-and-classes-contracts-data')): + for series in scd.find_all('series'): + for class_contract in series.find_all('class-contract'): + class_id_tag = class_contract.find('class-contract-id') + class_name_tag = class_contract.find('class-contract-name') + if class_id_tag and class_name_tag: + id_text_node = class_id_tag.find(string=True, recursive=False) + name_text_node = class_name_tag.find(string=True, recursive=False) + if id_text_node and name_text_node: + class_id = id_text_node.strip() + class_name = name_text_node.strip() + if class_id: + class_name_map[class_id] = class_name + return class_name_map + +def _iter_document_blocks(raw_bytes: bytes): + for match in re.finditer(rb"(.*?)", raw_bytes, re.S | re.I): + yield match.group(1) + +def _iter_document_blocks_from_file(filepath: pathlib.Path, chunk_size: int = 8 * 1024 * 1024): + start_re = re.compile(rb"", re.I) + end_re = re.compile(rb"", re.I) + overlap = 64 + buffer = b"" + in_document = False + doc_parts: List[bytes] = [] + + with filepath.open("rb") as handle: + while True: + chunk = handle.read(chunk_size) + if not chunk: + break + buffer += chunk + while True: + if not in_document: + start_match = start_re.search(buffer) + if not start_match: + buffer = buffer[-overlap:] + break + buffer = buffer[start_match.end():] + doc_parts = [] + in_document = True + + end_match = end_re.search(buffer) + if end_match: + doc_parts.append(buffer[:end_match.start()]) + yield b"".join(doc_parts) + buffer = buffer[end_match.end():] + doc_parts = [] + in_document = False + continue + + if len(buffer) > overlap: + doc_parts.append(buffer[:-overlap]) + buffer = buffer[-overlap:] + break + +def _first_document_block_from_file(filepath: pathlib.Path) -> Optional[bytes]: + return next(_iter_document_blocks_from_file(filepath), None) + +def _read_prefix_until_any( + filepath: pathlib.Path, + markers: List[bytes], + chunk_size: int = 1024 * 1024, + max_bytes: int = 64 * 1024 * 1024, +) -> bytes: + buffer = b"" + marker_res = [re.compile(re.escape(marker), re.I) for marker in markers if marker] + with filepath.open("rb") as handle: + while True: + chunk = handle.read(chunk_size) + if not chunk: + return buffer + buffer += chunk + earliest_end: Optional[int] = None + for marker_re in marker_res: + match = marker_re.search(buffer) + if match and (earliest_end is None or match.end() < earliest_end): + earliest_end = match.end() + if earliest_end is not None: + return buffer[:earliest_end] + if len(buffer) >= max_bytes: + return buffer + +def _file_contains_bytes(filepath: pathlib.Path, needle: bytes, chunk_size: int = 4 * 1024 * 1024) -> bool: + if not needle: + return False + needle_lower = needle.lower() + overlap = max(0, len(needle) - 1) + tail = b"" + with filepath.open("rb") as handle: + while True: + chunk = handle.read(chunk_size) + if not chunk: + return False + haystack = tail + chunk + if needle_lower in haystack.lower(): + return True + tail = haystack[-overlap:] if overlap else b"" + +def _extract_xml_blobs_from_body_bytes(body_bytes: bytes) -> List[str]: + return [ + normalize_text_markup(match.group(1)) + for match in re.finditer(rb"(.*?)", body_bytes, re.S | re.I) + ] + +def _body_bytes_without_xml(body_bytes: bytes) -> bytes: + if not re.search(rb".*?", b"", body_bytes, flags=re.S | re.I) + +def _iter_pdf_attachment_texts_from_file( + filepath: pathlib.Path, + skip_types: set[bytes], + xbrl_ex_re: re.Pattern, +): + for doc_bytes in _iter_document_blocks_from_file(filepath): + m = re.search(rb"\s*([^\s<]+)", doc_bytes, re.I) + doc_type_bytes = m.group(1).upper() if m else b"" + if doc_type_bytes in skip_types or xbrl_ex_re.match(doc_type_bytes): + continue + text_match = re.search(rb"(.*?)", doc_bytes, re.S | re.I) + body_bytes = text_match.group(1) if text_match else doc_bytes + if not body_bytes.strip(): + desc_match = re.search(rb"\s*(.*?)\s*<", doc_bytes, re.S | re.I) + if desc_match: + body_bytes = desc_match.group(1) + if re.search(rb"", body_bytes, re.I): + yield body_bytes.decode('latin-1', errors='replace') + +def process_local_xbrl(filepath: pathlib.Path) -> str: + """ + Read an EDGAR filing (HTML/HTM/TXT) and return a clean Markdown version. + This version processes documents sequentially to preserve the original filing order. + """ + global LAST_PARSE_STATS + parse_stats = _new_parse_stats(filepath) + + def record(source_format: str, text: str, label: str = "") -> None: + _record_parse_stats_part(parse_stats, source_format, text, label) + + def record_pdf(text: str, label: str, page_count: int) -> None: + _record_parse_stats_part(parse_stats, "pdf", text, label) + _record_parse_stats_pdf_pages(parse_stats, page_count) + + def finish(markdown_text: str, form_type: str = "") -> str: + global LAST_PARSE_STATS + final_text = (markdown_text or "").strip() + LAST_PARSE_STATS = _finalize_parse_stats(parse_stats, final_text, form_type or main_form_type) + return final_text + + prefix_bytes = _read_prefix_until_any( + filepath, + [b"", b"", b""], + ) + first_doc_bytes = _first_document_block_from_file(filepath) + + main_form_type = "" + legacy_paper_forms = {"MSDW", "MSDCO", "MSD", "MSD/A", "8-M", "9-M"} + + class_name_map = {} + def is_html_content(content_str): + return re.search(r"<\s*(html|div|p)\b", content_str, re.I) + + header_match_bytes = re.search(rb"(.*?)", prefix_bytes, re.S | re.I) + + ims_header_match_bytes = re.search(rb"(.*?)", prefix_bytes, re.S | re.I) + + header_part = "" + sgml_header_part = "" + + if header_match_bytes: + header_bytes = header_match_bytes.group(1) + header_content = normalize_text_markup(header_bytes) + class_name_map = _extract_class_name_map_from_header_content(header_content) + + sgml_header_part = parse_series_and_classes_sgml(header_content) + + if (m := re.search(r"CONFORMED SUBMISSION TYPE:\s*([^\s]+)", header_content, re.I)): + main_form_type = m.group(1).strip().upper() + + if (main_form_type.startswith("ADV") or main_form_type in legacy_paper_forms) and _file_contains_bytes(filepath, b""): + print(f"--> Detected legacy paper filing: {main_form_type}. Routing to dedicated paper parser.") + full_text_decoded = filepath.read_bytes().decode('latin-1', 'replace') + legacy_md = parse_legacy_paper_filing(full_text_decoded, main_form_type) + record("text", legacy_md, "legacy_paper") + return finish(legacy_md, main_form_type) + + if main_form_type in ("497", "24F-2NT"): + header_part = parse_form497_file(header_content) + else: + header_part = parse_sec_header(header_content) + record("sgml", header_part, "sec_header") + record("sgml", sgml_header_part, "series_classes_sgml") + + elif ims_header_match_bytes: + full_text_decoded = filepath.read_bytes().decode('latin-1', 'replace') + header_part = parse_ims_header(full_text_decoded) + + if (m := re.search(r"CONFORMED SUBMISSION TYPE:\s*([^\s]+)", full_text_decoded, re.I)): + main_form_type = m.group(1).strip().upper() + + if (main_form_type.startswith("ADV") or main_form_type in legacy_paper_forms) and _file_contains_bytes(filepath, b""): + print(f"--> Detected legacy paper filing: {main_form_type}. Routing to dedicated paper parser.") + legacy_md = parse_legacy_paper_filing(full_text_decoded, main_form_type) + record("text", legacy_md, "legacy_paper") + return finish(legacy_md, main_form_type) + record("sgml", header_part, "ims_header") + else: + header_part = "" + record("sgml", header_part, "missing_header_placeholder") + + if first_doc_bytes is None: + raw_bytes = filepath.read_bytes() + body_content_bytes_match = re.search(rb"(.*)", raw_bytes, re.S | re.I) + if not body_content_bytes_match: + header_end = header_match_bytes.end() if header_match_bytes else 0 + body_content_bytes_match = raw_bytes[header_end:] + else: + body_content_bytes_match = body_content_bytes_match.group(1) + + if re.search(rb"", body_content_bytes_match, re.I): + pdf_md, pdf_page_count = parse_pdf_attachments([body_content_bytes_match.decode('latin-1', 'replace')]) + record_pdf(pdf_md, "embedded_pdf", pdf_page_count) + return finish(f"{header_part}\n\n{pdf_md}", main_form_type) + + body_content = normalize_text_markup(body_content_bytes_match) + if is_html_content(body_content): + md, positioned = parse_html_filing(body_content, form_type="", file_path=filepath) + if positioned: + body_md = md + record_pdf(body_md, "positioned_html_ocr", LAST_POSITIONED_HTML_OCR_PAGE_COUNT) + else: + body_md = _post_process_text_cleanup(md) + record("html", body_md, "body_html") + else: + body_md = parse_plaintext_filing(body_content) + record("text", body_md, "body_text") + return finish(f"{header_part}\n\n{body_md}", main_form_type) + + parts: List[str] = [header_part] if header_part else [] + if sgml_header_part: + parts.append(sgml_header_part) + pre_stash: List[str] = [] + xml_blobs: List[str] = [] + pdf_blobs: List[str] = [] + + if main_form_type.startswith(('13F-', 'N-PX')): + all_xml_contents = [] + legacy_text_content = "" + for doc_bytes in _iter_document_blocks_from_file(filepath): + text_match = re.search(rb"(.*?)", doc_bytes, re.S | re.I) + body_bytes = text_match.group(1) if text_match else doc_bytes + if not body_bytes.strip(): continue + xmls_in_doc = _extract_xml_blobs_from_body_bytes(body_bytes) + if xmls_in_doc: + all_xml_contents.extend(xmls_in_doc) + else: + doc_content = normalize_text_markup(body_bytes) + if doc_content.strip() and "13F" in main_form_type: + legacy_text_content = doc_content + break + + if all_xml_contents: + parts = [header_part] if header_part else [] + xml_md = parse_any_xml(all_xml_contents) + + + record("xml", xml_md, "13f_npx_xml") + parts.append(xml_md) + final_md = "\n\n".join(p for p in parts if p.strip()) + return finish(final_md, main_form_type) + + elif legacy_text_content: + legacy_md = parse_legacy_13f_hr_txt(legacy_text_content) + record("text", legacy_md, "legacy_13f_text") + return finish(f"{header_part}\n\n{legacy_md}", main_form_type) + + skip_types = {b"EXCEL", b"XML", b"XBRLSUMMARY", b"JSON", b"ZIP", b"PAPER", b"GRAPHIC"} + xbrl_ex_re = re.compile(rb"^EX-101\.(INS|SCH|CAL|DEF|LAB|PRE)$", re.I) + if not main_form_type and first_doc_bytes is not None: + if (m := re.search(rb"\s*([^\s<]+)", first_doc_bytes, re.I)): + main_form_type = m.group(1).upper().decode('ascii', 'ignore') + + saw_pdf_blobs = False + for idx, doc_bytes in enumerate(_iter_document_blocks_from_file(filepath), start=1): + _debug_print(f"Document {idx} is being processed") + + m = re.search(rb"\s*([^\s<]+)", doc_bytes, re.I) + doc_type_bytes = m.group(1).upper() if m else b"" + doc_type = doc_type_bytes.decode('ascii', 'ignore') + + if doc_type_bytes in skip_types or xbrl_ex_re.match(doc_type_bytes): + continue + + text_match = re.search(rb"(.*?)", doc_bytes, re.S | re.I) + body_bytes = text_match.group(1) if text_match else doc_bytes + + if not body_bytes.strip(): + desc_match = re.search(rb"\s*(.*?)\s*<", doc_bytes, re.S | re.I) + if desc_match: + body_bytes = desc_match.group(1) + + if re.search(rb"", body_bytes, re.I): + saw_pdf_blobs = True + continue + + xmls_in_doc = _extract_xml_blobs_from_body_bytes(body_bytes) + if xmls_in_doc: + xml_blobs.extend(xmls_in_doc) + doc_content = normalize_text_markup(_body_bytes_without_xml(body_bytes)) + else: + doc_content = normalize_text_markup(body_bytes) + + body_wo_xml = doc_content + body_wo_xml = _extract_pre_blocks(body_wo_xml, pre_stash) + + is_legacy_form4_doc = (not xmls_in_doc) and (doc_type == "4") + parsed_part = "" + + if doc_type.startswith("NSAR-B") and not is_html_content(body_wo_xml): + parsed_part = parse_nsar_b_txt(body_wo_xml) + parsed_source_format = "text" + elif is_html_content(body_wo_xml) or xmls_in_doc: + html_part, positioned = parse_html_filing(body_wo_xml, form_type=main_form_type, file_path=filepath) + if positioned: + parsed_part = html_part + parsed_source_format = "pdf" + else: + parsed_part = _post_process_text_cleanup(html_part, legacy_form4=is_legacy_form4_doc) + parsed_source_format = "html" + else: + parsed_part = parse_plaintext_filing(body_wo_xml) + parsed_source_format = "text" + + if parsed_part.strip(): + if parsed_source_format == "pdf": + record_pdf(parsed_part, doc_type or "positioned_html_ocr", LAST_POSITIONED_HTML_OCR_PAGE_COUNT) + else: + record(parsed_source_format, parsed_part, doc_type or parsed_source_format) + if (ex := re.match(r"EX[-\s]?(\d+\.\d+)", doc_type, re.I)) and not xmls_in_doc: + parts.append(f"\n## Exhibit {ex.group(1)}\n") + elif doc_type and doc_type not in {main_form_type, ""} and not xmls_in_doc: + desc_match = re.search(r"\s*(.*?)\s*<", doc_content, re.I) + parts.append(f"\n## {(desc_match.group(1) if desc_match else doc_type).title()}\n") + parts.append(parsed_part) + + if saw_pdf_blobs: + pdf_md, pdf_page_count = parse_pdf_attachments( + _iter_pdf_attachment_texts_from_file(filepath, skip_types, xbrl_ex_re) + ) + record_pdf(pdf_md, "pdf_attachments", pdf_page_count) + parts.append(pdf_md) + if xml_blobs: + xml_md = parse_any_xml(xml_blobs, pdf_docs=None, class_name_map=class_name_map) + record("xml", xml_md, "xml_documents") + parts.append(xml_md) + + final_md = "\n\n".join(p for p in parts if p.strip()) + + rendered_pre = [parse_plaintext_filing(b) for b in pre_stash] + if rendered_pre: + record("text", "\n\n".join(rendered_pre), "pre_blocks") + for i, block in enumerate(rendered_pre): + final_md = final_md.replace(f"__PRE_BLOCK_{i:03d}__", block) + + return finish(final_md, main_form_type) + +def conditional_delete(match_object): + """ + This function is called for every match. + It checks the length of the content after 'begin 644 '. + """ + captured_content = match_object.group(1) + + if len(captured_content) > 50: + return match_object.group(0) + else: + return "" + +def main_one(path: pathlib.Path, to_mmd: bool = False, source_document_url: Optional[str] = None) -> None: + + global CURRENT_PROCESSING_FILE, CURRENT_SOURCE_DOCUMENT_URL + CURRENT_PROCESSING_FILE = str(path.resolve()) + CURRENT_SOURCE_DOCUMENT_URL = (source_document_url or '').strip() or None + try: + doc = process_local_xbrl(path) + if not doc: + raise ValueError("empty output") + + doc = doc.replace('| — |', '| |').replace('| — |', '| |').replace('|
— |', '| |').replace('|
— |', '| |').replace('|
- |', '| |').replace('|
- |', '| |') + + before_pattern = re.compile(r'(?---\n", "\n---\n") + + delimiter_fix_pattern = re.compile(r'(^---)([ \t]*[^-].*)', re.MULTILINE) + doc = delimiter_fix_pattern.sub(r'\1\n\n\2', doc) + + if to_mmd == False: + doc = re.sub(r'(?<=[^\s-])', ' ', doc) + doc = doc.replace('', r'\|') + doc = doc.replace('##MD_NEWLINE##', '
') + + if to_mmd: + doc = convert_all_tables_to_mmd(doc) + doc = apply_markdown_hardcodes(doc) + doc = doc.replace("", "^").replace("", "^") + doc = doc.replace("", "~").replace("", "~") + doc = doc.replace(r"\ |", r"\|") + + doc = re.sub(r'(?<=[^\s-])', ' ', doc) + doc = doc.replace('', r'\|') + doc = doc.replace('##MD_NEWLINE##', '
') + + if (r"\>/R\<" in doc and r"\>R\<" in doc and r"\>PAGE\<" in doc): + doc = doc.replace(r"\>/R\<", "").replace(r"\>R\<", "").replace(r"\>R\/R\<", "").replace(r"\>R\<", "").replace(r"\>R\\", "").replace(r"#### \>PAGE\<", "") + doc = re.sub(r"\n\\>PAGE\\< ?\r?\n", "\n", doc) + MARKERS = re.compile( + r'(?m)^[ \t]*(?:\*\*\\?>R\\?\*\*|\d+[ \t]+\\?>PAGE\\?<)[ \t]*(?:\r?\n|$)' + ) + doc = MARKERS.sub('', doc) + + doc = re.sub(r'##(ROWSPAN_\d+|COLSPAN_\d+)##', '', doc) + doc = doc.replace("
)", ")").replace("
]", "]").replace("
%", "%").replace("
)%", ")%") + doc = re.sub(r'
\.[1-9](?!\d)', '', doc, flags=re.I) + doc = re.sub(r'(?<=\| )—(?= +\|)', ' ', doc) + doc = re.sub(r'\$ (?=[0-9(])', '$', doc) + doc = doc.replace('##SINGLE_ASTERISK##', '').replace('##DOUBLE_ASTERISK##', '').replace('##TRIPLE_ASTERISK##', '') + doc = re.sub(r'(?m)^\| o (?=.)', r'| ##INDENT##o ', doc) + doc = doc.replace('\u2063', '').replace('##INDENT##', '    ') + doc = doc.replace("\n\n\n\n", "") + doc = doc.replace("| **%****%** |", "| **%** |") + doc = re.sub(r'(\*{3,})(\.\*\*|\)?%\*\*)', r'*\2', doc) + m = re.search(r'^begin 644.*(?:\r?\n|$)', doc, flags=re.M) + doc = doc.replace("| nan |", "| |").replace("| nan |", "| |") + doc = doc[:m.start()] if m else doc + doc = doc.replace("\n## Excel\n\n\n", "") + doc = doc.replace('##SPACE##', ' ').replace('##I_SPACE##', ' ') + doc = doc.replace("
|", " |") + doc = re.sub(r"\n *
*\n", "\n", doc) + doc = re.sub(r"(\d+)\s%( \|)", r"\1%\2", doc) + doc = doc.replace("| %** |", "| **%** |") + doc = re.sub(r"\*\*(\d+(?:\.\d+)?)\*\*\s*%?\s*\*\*(?=\s|$)", r"**\1%**", doc) + doc = re.sub(r"(^|\n)
(?=#+)", r"\1", doc) + doc = re.sub(r"
(?=#+)", r"\n\n", doc) + doc = doc.replace("
------", "------").replace("

", "
").replace("
**)**", "**)**") + doc = doc.replace("\n# # #\n", "\n\\# \\# \\#\n") + doc = doc.replace("\n
---\n\n|", "\n---\n\n|") + pattern = r'(?<=[^\s-])------(?=\r?\n)' + replacement = r'\n\n------' + doc = re.sub(pattern, replacement, doc) + doc = doc.replace("
% |", "% |").replace("*
*%* |", "%* |").replace("***
*%***", "%***").replace("*
*%* |", "%* |") + + + + + + + + + + item_heading_pattern = re.compile( + r'(^\s*(?:\*\*)?\s*(?:item\s+)?\d+[A-Z]?\.)' + r'(?=[A-Z])', + re.IGNORECASE | re.MULTILINE + ) + doc = item_heading_pattern.sub(r'\1 ', doc) + pattern = r"\|\s\**(?: | )+\**\s\|" + + doc = doc.replace(") ** |", ")** |") + + doc = re.sub( + r"\*\*\(\s*(\$?[+-]?\d[\d,]*(?:\.\d+)?)\s*\*\*\s*(?:\s*)?\s*\*\*\)\s*\*\*", + r"**(\1)**", + doc, + flags=re.IGNORECASE, + ) + + doc = re.sub( + r"\*\*([+-]?\d[\d,]*(?:\.\d+)?)\*\*(\s*(?:)?\s*(%?)\s*)\*\*(?=\s|$)", + lambda m: f"**{m.group(1)}{'%' if not m.group(3) else m.group(2)}**", + doc, + flags=re.IGNORECASE, + ) + + doc = re.sub(pattern, "| |", doc) + doc = re.sub(pattern, "| |", doc) + + + + + doc = re.sub(r'(?m)^\*\*2\*\*\s+\*\*nd\b', r'**2nd', doc) + + doc = doc.replace("** ---\n\n|", "**\n\n---\n\n|") + doc = doc.replace("| $**$** | $%** |", "| **$** | **%** |").replace('| — |', '| |').replace('| — |', '| |').replace("| **$Change** |", "| **$ Change** |").replace("| **%Change** |", "| **% Change** |").replace("     months |", " months |") + pattern = r"(\n\n\*\*\d+)\. \*\*" + replacement = r"\1.**" + + doc = re.sub(pattern, replacement, doc) + doc = doc.replace("**• ** ", "**•** ") + + pattern = re.compile(r"^(begin 644 (.*)(\r?\n|$))", re.MULTILINE) + + doc = pattern.sub(conditional_delete, doc) + + doc = re.sub(r'\^\((\d+)\)((?: )+)\^', r'^(\1)^\2', doc) + + doc = doc.replace("**6** **.** **ACCOUNTS RECEIVABLE**", "**6.** **ACCOUNTS RECEIVABLE**").replace("**1** **2.** **Long-term debt**", "**12.** **Long-term debt**").replace("**1** **3.** **Employee future benefits**", "**13.** **Employee future benefits**") + + out = path.with_suffix(".md") + out.write_text(doc, encoding="utf-8") + parse_stats = _complete_parse_stats_for_output( + LAST_PARSE_STATS, + output_path=out, + final_markdown=doc, + to_mmd=to_mmd, + ) + try: + _write_parse_stats_outputs(parse_stats, out) + _print_parse_stats_summary(parse_stats) + except Exception as stats_exc: + print(f"[parse-stats warning] Could not write parse stats: {stats_exc}") + print(f"Successful! Output written to {out}") + except Exception as e: + logging.error( + f"FILE: {path.name}\n" + f"ERROR: {e}\n" + f"TRACEBACK:\n{traceback.format_exc()}" + ) + print(f"[ERROR] {path}: {e}. Details logged to {log_file_path.name}", file=sys.stderr) + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="A command-line tool to parse an SEC filing in HTML, HTM, or TXT format and convert it to Markdown.", + formatter_class=argparse.RawTextHelpFormatter + ) + parser.add_argument("path", nargs="?", help="Path to the SEC filing (e.g., your_file.html).") + parser.add_argument( + "--to_mmd", + action="store_true", + help="Convert all tables in the final output to MultiMarkdown format." + ) + parser.add_argument( + "--source-document-url", + help="Optional absolute source document URL used to resolve fragment links like #a_001." + ) + parser.add_argument( + "--mistral-key-status", + action="store_true", + help="Print the shared Mistral key rotation/usage monitor JSON and exit." + ) + parser.add_argument( + "--reset-mistral-key-status", + action="store_true", + help="Reset the shared Mistral key rotation/usage monitor JSON and exit." + ) + + args = parser.parse_args() + + if args.reset_mistral_key_status: + print(json.dumps(reset_mistral_key_status(), indent=2, sort_keys=True)) + sys.exit(0) + + if args.mistral_key_status: + print(json.dumps(get_mistral_key_status_snapshot(), indent=2, sort_keys=True)) + sys.exit(0) + + if not args.path: + parser.error("the following arguments are required: path") + + file_path = pathlib.Path(args.path) + if not file_path.is_file() and file_path.parts and file_path.parts[0] == "sec_parser": + alt_path = pathlib.Path(*file_path.parts[1:]) + if alt_path.is_file(): + print(f"[info] Using '{alt_path}' instead of '{args.path}'.") + file_path = alt_path + if not file_path.is_file(): + print(f"Error: File not found at {args.path}", file=sys.stderr) + sys.exit(1) + + main_one(file_path, to_mmd=args.to_mmd, source_document_url=args.source_document_url)