sefd-anonymous commited on
Commit
62787e2
·
verified ·
1 Parent(s): ba16513

Upload SEC parser release files

Browse files
.gitattributes CHANGED
@@ -58,3 +58,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
58
  # Video files - compressed
59
  *.mp4 filter=lfs diff=lfs merge=lfs -text
60
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
58
  # Video files - compressed
59
  *.mp4 filter=lfs diff=lfs merge=lfs -text
60
  *.webm filter=lfs diff=lfs merge=lfs -text
61
+ __pycache__/table_ocr_backends.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.8-slim
2
+
3
+ RUN apt-get update && apt-get install -y --no-install-recommends \
4
+ build-essential python3-dev pkg-config curl xz-utils ca-certificates \
5
+ wkhtmltopdf \
6
+ && rm -rf /var/lib/apt/lists/*
7
+
8
+ WORKDIR /app
9
+
10
+ # build exact libs
11
+ RUN curl -LO https://download.gnome.org/sources/libxml2/2.11/libxml2-2.11.9.tar.xz \
12
+ && tar -xf libxml2-2.11.9.tar.xz && cd libxml2-2.11.9 \
13
+ && ./configure --prefix=/opt/xml && make -j && make install && cd .. \
14
+ && rm -rf libxml2-2.11.9 libxml2-2.11.9.tar.xz \
15
+ && curl -LO https://download.gnome.org/sources/libxslt/1.1/libxslt-1.1.39.tar.xz \
16
+ && tar -xf libxslt-1.1.39.tar.xz && cd libxslt-1.1.39 \
17
+ && PKG_CONFIG_PATH=/opt/xml/lib/pkgconfig ./configure --prefix=/opt/xml \
18
+ && make -j && make install && cd .. \
19
+ && rm -rf libxslt-1.1.39 libxslt-1.1.39.tar.xz
20
+
21
+ ENV CFLAGS="-I/opt/xml/include" \
22
+ LDFLAGS="-L/opt/xml/lib" \
23
+ PKG_CONFIG_PATH="/opt/xml/lib/pkgconfig" \
24
+ LD_LIBRARY_PATH="/opt/xml/lib"
25
+
26
+ COPY requirements.txt .
27
+ RUN pip install -U pip setuptools wheel \
28
+ && pip install --no-binary lxml -r requirements.txt
29
+
30
+ RUN playwright install --with-deps chromium
31
+
32
+ COPY . .
Dockerfile.linux ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ FROM python:3.12.12-bookworm
2
+ ENV DEBIAN_FRONTEND=noninteractive
3
+ WORKDIR /app
4
+ RUN apt-get update && apt-get install -y wkhtmltopdf && rm -rf /var/lib/apt/lists/*
5
+ COPY requirements.txt .
6
+ RUN pip install --no-cache-dir -r requirements.txt
7
+ RUN playwright install chromium --with-deps
README.md ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ pretty_name: SEC Edgar Filings Datasets Parser
4
+ tags:
5
+ - sec
6
+ - edgar
7
+ - financial-documents
8
+ - document-parsing
9
+ - markdown
10
+ ---
11
+
12
+ # SEC Edgar Filings Datasets Parser
13
+
14
+ This repository contains the core parser used to convert SEC EDGAR filings into layout-faithful Markdown-style text for downstream dataset construction and evaluation.
15
+
16
+ ## Contents
17
+
18
+ - `sec_parser/sec_parser.py`: main parser implementation
19
+ - `sec_parser/special_chars.py`: special-character normalization tables
20
+ - `sec_parser/hardcodes.py`: filing cleanup hardcodes
21
+ - `sec_parser/config.py`: parser configuration
22
+ - `pdf_table_fastpath.py`, `table_ocr_backends.py`, `mistral_pdf_ocr_overlay.py`: PDF/table OCR helper code
23
+ - `test_sec_parser.py`: smoke/regression tests
24
+ - `Dockerfile`, `Dockerfile.linux`, `requirements.txt`: reproducible environment files
25
+
26
+ ## Quick Start
27
+
28
+ ```bash
29
+ pip install -r requirements.txt
30
+ python -m unittest test_sec_parser.py
31
+ python sec_parser/sec_parser.py path/to/filing.txt
32
+ ```
33
+
34
+ Some PDF/OCR paths require external OCR credentials and browser rendering dependencies.
__pycache__/mistral_pdf_ocr_overlay.cpython-311.pyc ADDED
Binary file (6.7 kB). View file
 
__pycache__/pdf_table_fastpath.cpython-311.pyc ADDED
Binary file (82.1 kB). View file
 
__pycache__/table_ocr_backends.cpython-311.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6279f3fb60ef8f8b51bf25284cdf4035f8ed3e17b86975f4762d3b1d1ab000b7
3
+ size 322041
__pycache__/test_sec_parser.cpython-311.pyc ADDED
Binary file (6.29 kB). View file
 
mistral_pdf_ocr_overlay.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ import argparse
5
+ import json
6
+ from pathlib import Path
7
+
8
+ import table_ocr_backends
9
+
10
+
11
+ def parse_args() -> argparse.Namespace:
12
+ parser = argparse.ArgumentParser(
13
+ description=(
14
+ "Run OCR on a PDF page, then overlay born-digital PDF cell bboxes "
15
+ "and recovered bold/italic/underline formatting onto the returned table HTML."
16
+ )
17
+ )
18
+ parser.add_argument("--pdf", required=True, help="Path to the source PDF.")
19
+ parser.add_argument("--page", required=True, type=int, help="1-based PDF page number.")
20
+ parser.add_argument(
21
+ "--model-id",
22
+ default=None,
23
+ help="Optional OCR model id override. Defaults to the configured/default PDF page OCR model.",
24
+ )
25
+ parser.add_argument(
26
+ "--input-html",
27
+ default=None,
28
+ help="Optional path to an existing OCR HTML fragment. When provided, the script skips OCR and only applies the PDF-native overlay.",
29
+ )
30
+ parser.add_argument(
31
+ "--style-overlay-mode",
32
+ default="auto",
33
+ choices=["none", "attrs_only", "formatting_only", "auto", "aggressive"],
34
+ help=(
35
+ "Formatting overlay behavior. "
36
+ "`formatting_only` preserves OCR text and only injects semantic bold/italic/underline tags. "
37
+ "`auto` safely swaps in native styled cell HTML when the text match is strong. "
38
+ "`aggressive` prefers native styled cell HTML whenever a cell matches."
39
+ ),
40
+ )
41
+ parser.add_argument("--page-render-zoom", type=float, default=None, help="Optional PDF render zoom before OCR.")
42
+ parser.add_argument("--output-html", default=None, help="Optional path to write the final annotated HTML.")
43
+ parser.add_argument(
44
+ "--output-raw-html",
45
+ default=None,
46
+ help=(
47
+ "Optional path to write the original OCR HTML before PDF-native overlay. "
48
+ "If omitted and --output-html is set, defaults to a sibling '*.raw.html' file."
49
+ ),
50
+ )
51
+ parser.add_argument("--output-json", default=None, help="Optional path to write the full JSON payload.")
52
+ return parser.parse_args()
53
+
54
+
55
+ def main() -> None:
56
+ args = parse_args()
57
+ if args.input_html:
58
+ input_html_path = Path(args.input_html).resolve()
59
+ input_html = input_html_path.read_text(encoding="utf-8")
60
+ payload = table_ocr_backends.overlay_pdf_page_html_with_native_cells(
61
+ input_html,
62
+ pdf_path=args.pdf,
63
+ page_number=max(1, int(args.page)),
64
+ effective_model_id=args.model_id or "existing-html+pdf-overlay",
65
+ style_overlay_mode=args.style_overlay_mode,
66
+ )
67
+ else:
68
+ payload = table_ocr_backends.transcribe_pdf_page_to_payload(
69
+ args.pdf,
70
+ page_number=max(1, int(args.page)),
71
+ model_id=args.model_id,
72
+ page_render_zoom=args.page_render_zoom,
73
+ overlay_pdf_cells=True,
74
+ style_overlay_mode=args.style_overlay_mode,
75
+ )
76
+
77
+ output_html = str(payload.get("html") or "")
78
+ raw_html = str(payload.get("raw_html") or output_html)
79
+ output_html_path = Path(args.output_html).resolve() if args.output_html else None
80
+ output_raw_html_path = None
81
+ if args.output_raw_html:
82
+ output_raw_html_path = Path(args.output_raw_html).resolve()
83
+ elif output_html_path is not None:
84
+ output_raw_html_path = output_html_path.with_name(f"{output_html_path.stem}.raw.html")
85
+
86
+ if args.output_html:
87
+ assert output_html_path is not None
88
+ output_html_path.parent.mkdir(parents=True, exist_ok=True)
89
+ output_html_path.write_text(output_html, encoding="utf-8")
90
+
91
+ if output_raw_html_path is not None:
92
+ output_raw_html_path.parent.mkdir(parents=True, exist_ok=True)
93
+ output_raw_html_path.write_text(raw_html, encoding="utf-8")
94
+
95
+ if args.output_json:
96
+ output_json_path = Path(args.output_json).resolve()
97
+ output_json_path.parent.mkdir(parents=True, exist_ok=True)
98
+ output_json_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\n", encoding="utf-8")
99
+
100
+ if not args.output_html and not args.output_json:
101
+ print(output_html)
102
+ return
103
+
104
+ summary = {
105
+ "pdf": str(Path(args.pdf).resolve()),
106
+ "page": int(args.page),
107
+ "model_id": payload.get("effective_model_id"),
108
+ "overlay_applied": bool(payload.get("overlay_applied")),
109
+ "overlay_changed_html": output_html != raw_html,
110
+ "style_overlay_mode": payload.get("style_overlay_mode"),
111
+ "timings_ms": payload.get("timings_ms"),
112
+ "output_html": str(output_html_path) if output_html_path is not None else None,
113
+ "output_raw_html": str(output_raw_html_path) if output_raw_html_path is not None else None,
114
+ "output_json": str(Path(args.output_json).resolve()) if args.output_json else None,
115
+ }
116
+ print(json.dumps(summary, indent=2, sort_keys=True))
117
+
118
+
119
+ if __name__ == "__main__":
120
+ main()
pdf_table_fastpath.py ADDED
@@ -0,0 +1,1197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ import argparse
5
+ import html
6
+ import json
7
+ import os
8
+ import time
9
+ from dataclasses import dataclass
10
+ from pathlib import Path
11
+ from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple
12
+
13
+ import fitz
14
+ import numpy as np
15
+ from PIL import Image, ImageDraw
16
+
17
+
18
+ MIN_NATIVE_SPAN_COUNT = 24
19
+
20
+
21
+ @dataclass
22
+ class StyledFragment:
23
+ text: str
24
+ bbox: Tuple[float, float, float, float]
25
+ font: str
26
+ flags: int
27
+ size: float
28
+ bold: bool
29
+ italic: bool
30
+ underline: bool
31
+
32
+ def to_payload(self) -> Dict[str, Any]:
33
+ return {
34
+ "text": self.text,
35
+ "bbox": [round(value, 2) for value in self.bbox],
36
+ "font": self.font,
37
+ "flags": int(self.flags),
38
+ "size": round(float(self.size), 2),
39
+ "bold": bool(self.bold),
40
+ "italic": bool(self.italic),
41
+ "underline": bool(self.underline),
42
+ }
43
+
44
+
45
+ @dataclass
46
+ class RowGroup:
47
+ cy: float
48
+ items: List[StyledFragment]
49
+
50
+ @property
51
+ def bbox(self) -> Tuple[float, float, float, float]:
52
+ return union_bbox(item.bbox for item in self.items)
53
+
54
+ def to_payload(self) -> Dict[str, Any]:
55
+ return {
56
+ "bbox": [round(value, 2) for value in self.bbox],
57
+ "text": " | ".join(item.text for item in self.items),
58
+ "fragments": [item.to_payload() for item in self.items],
59
+ }
60
+
61
+
62
+ @dataclass
63
+ class TableCandidate:
64
+ rect: Tuple[float, float, float, float]
65
+ source: str
66
+ score: float
67
+ rows: List[RowGroup]
68
+
69
+ def to_payload(self) -> Dict[str, Any]:
70
+ return {
71
+ "bbox": [round(value, 2) for value in self.rect],
72
+ "source": self.source,
73
+ "score": round(float(self.score), 2),
74
+ "row_count": len(self.rows),
75
+ "fragment_count": sum(len(row.items) for row in self.rows),
76
+ "rows": [row.to_payload() for row in self.rows],
77
+ }
78
+
79
+
80
+ @dataclass
81
+ class TableCellCandidate:
82
+ row: int
83
+ col: int
84
+ rowspan: int
85
+ colspan: int
86
+ bbox: Tuple[float, float, float, float]
87
+ text: str
88
+ html: str
89
+ header: bool
90
+ bold: bool
91
+ italic: bool
92
+ underline: bool
93
+
94
+ def to_payload(self) -> Dict[str, Any]:
95
+ return {
96
+ "row": int(self.row),
97
+ "col": int(self.col),
98
+ "rowspan": int(self.rowspan),
99
+ "colspan": int(self.colspan),
100
+ "bbox": [round(value, 2) for value in self.bbox],
101
+ "text": self.text,
102
+ "html": self.html,
103
+ "header": bool(self.header),
104
+ "bold": bool(self.bold),
105
+ "italic": bool(self.italic),
106
+ "underline": bool(self.underline),
107
+ }
108
+
109
+
110
+ @dataclass
111
+ class RasterCandidate:
112
+ rect: Tuple[float, float, float, float]
113
+ score: float
114
+ row_count: int
115
+ avg_components_per_row: float
116
+ text_density: float
117
+ line_density: float
118
+
119
+ def to_payload(self) -> Dict[str, Any]:
120
+ return {
121
+ "bbox": [round(value, 2) for value in self.rect],
122
+ "score": round(float(self.score), 2),
123
+ "row_count": int(self.row_count),
124
+ "avg_components_per_row": round(float(self.avg_components_per_row), 2),
125
+ "text_density": round(float(self.text_density), 4),
126
+ "line_density": round(float(self.line_density), 4),
127
+ }
128
+
129
+
130
+ def union_bbox(boxes: Iterable[Sequence[float]]) -> Tuple[float, float, float, float]:
131
+ points = [tuple(float(value) for value in box) for box in boxes]
132
+ if not points:
133
+ return (0.0, 0.0, 0.0, 0.0)
134
+ return (
135
+ min(box[0] for box in points),
136
+ min(box[1] for box in points),
137
+ max(box[2] for box in points),
138
+ max(box[3] for box in points),
139
+ )
140
+
141
+
142
+ def bbox_iou(box_a: Sequence[float], box_b: Sequence[float]) -> float:
143
+ left = max(float(box_a[0]), float(box_b[0]))
144
+ top = max(float(box_a[1]), float(box_b[1]))
145
+ right = min(float(box_a[2]), float(box_b[2]))
146
+ bottom = min(float(box_a[3]), float(box_b[3]))
147
+ inter_w = max(0.0, right - left)
148
+ inter_h = max(0.0, bottom - top)
149
+ inter_area = inter_w * inter_h
150
+ area_a = max(0.0, float(box_a[2]) - float(box_a[0])) * max(0.0, float(box_a[3]) - float(box_a[1]))
151
+ area_b = max(0.0, float(box_b[2]) - float(box_b[0])) * max(0.0, float(box_b[3]) - float(box_b[1]))
152
+ union = max(1e-6, area_a + area_b - inter_area)
153
+ return inter_area / union
154
+
155
+
156
+ def width(box: Sequence[float]) -> float:
157
+ return max(0.0, float(box[2]) - float(box[0]))
158
+
159
+
160
+ def height(box: Sequence[float]) -> float:
161
+ return max(0.0, float(box[3]) - float(box[1]))
162
+
163
+
164
+ def center_y(box: Sequence[float]) -> float:
165
+ return (float(box[1]) + float(box[3])) / 2.0
166
+
167
+
168
+ def center_x(box: Sequence[float]) -> float:
169
+ return (float(box[0]) + float(box[2])) / 2.0
170
+
171
+
172
+ def is_numeric_like(text: str) -> bool:
173
+ normalized = str(text or "").strip()
174
+ if not normalized:
175
+ return False
176
+ return any(char.isdigit() for char in normalized)
177
+
178
+
179
+ def is_bold(font_name: str, flags: int) -> bool:
180
+ font_name = str(font_name or "").lower()
181
+ return "bold" in font_name or bool(int(flags or 0) & 16)
182
+
183
+
184
+ def is_italic(font_name: str, flags: int) -> bool:
185
+ font_name = str(font_name or "").lower()
186
+ return "italic" in font_name or "oblique" in font_name or bool(int(flags or 0) & 2)
187
+
188
+
189
+ def extract_horizontal_line_boxes(page: fitz.Page) -> List[Tuple[float, float, float, float]]:
190
+ line_boxes: List[Tuple[float, float, float, float]] = []
191
+ for drawing in page.get_drawings():
192
+ rect = drawing.get("rect")
193
+ if rect is None:
194
+ continue
195
+ if rect.width <= 0 or rect.height > 2.5:
196
+ continue
197
+ line_boxes.append((rect.x0, rect.y0, rect.x1, rect.y1))
198
+ return line_boxes
199
+
200
+
201
+ def span_has_underline(
202
+ bbox: Sequence[float],
203
+ *,
204
+ line_boxes: Sequence[Sequence[float]],
205
+ ) -> bool:
206
+ span_left, _span_top, span_right, span_bottom = (float(value) for value in bbox)
207
+ span_width = max(1.0, span_right - span_left)
208
+ for line_box in line_boxes:
209
+ line_left, line_top, line_right, line_bottom = (float(value) for value in line_box)
210
+ overlap = max(0.0, min(span_right, line_right) - max(span_left, line_left))
211
+ if overlap < (0.60 * span_width):
212
+ continue
213
+ if abs(line_top - span_bottom) <= 2.5 or abs(line_bottom - span_bottom) <= 2.5:
214
+ return True
215
+ return False
216
+
217
+
218
+ def extract_styled_fragments(page: fitz.Page) -> List[StyledFragment]:
219
+ rawdict = page.get_text("rawdict")
220
+ line_boxes = extract_horizontal_line_boxes(page)
221
+ fragments: List[StyledFragment] = []
222
+ for block in rawdict.get("blocks", []):
223
+ for line in block.get("lines", []):
224
+ for span in line.get("spans", []):
225
+ text = "".join(char.get("c", "") for char in span.get("chars", [])).strip()
226
+ if not text:
227
+ continue
228
+ bbox = tuple(float(value) for value in span.get("bbox", (0, 0, 0, 0)))
229
+ font = str(span.get("font") or "")
230
+ flags = int(span.get("flags") or 0)
231
+ fragments.append(
232
+ StyledFragment(
233
+ text=text,
234
+ bbox=bbox,
235
+ font=font,
236
+ flags=flags,
237
+ size=float(span.get("size") or 0.0),
238
+ bold=is_bold(font, flags),
239
+ italic=is_italic(font, flags),
240
+ underline=span_has_underline(bbox, line_boxes=line_boxes),
241
+ )
242
+ )
243
+ return fragments
244
+
245
+
246
+ def extract_layout_fragments(page: fitz.Page) -> List[StyledFragment]:
247
+ words = page.get_text("words", sort=True)
248
+ grouped_words: Dict[Tuple[int, int], List[Tuple[float, float, float, float, str]]] = {}
249
+ for word in words:
250
+ if len(word) < 8:
251
+ continue
252
+ x0, y0, x1, y1, text, block_no, line_no, _word_no = word[:8]
253
+ normalized_text = str(text or "").strip()
254
+ if not normalized_text:
255
+ continue
256
+ grouped_words.setdefault((int(block_no), int(line_no)), []).append(
257
+ (float(x0), float(y0), float(x1), float(y1), normalized_text)
258
+ )
259
+
260
+ fragments: List[StyledFragment] = []
261
+ for _line_key, line_words in sorted(grouped_words.items(), key=lambda item: (item[0][0], item[0][1])):
262
+ current_texts: List[str] = []
263
+ current_boxes: List[Tuple[float, float, float, float]] = []
264
+ previous_right: Optional[float] = None
265
+ current_height = 0.0
266
+ for x0, y0, x1, y1, text in sorted(line_words, key=lambda item: item[0]):
267
+ box = (x0, y0, x1, y1)
268
+ box_height = height(box)
269
+ gap = float("inf") if previous_right is None else max(0.0, x0 - previous_right)
270
+ merge_gap = max(8.0, current_height * 0.45, box_height * 0.45)
271
+ if current_texts and gap > merge_gap:
272
+ merged_box = union_bbox(current_boxes)
273
+ fragments.append(
274
+ StyledFragment(
275
+ text=" ".join(current_texts),
276
+ bbox=merged_box,
277
+ font="",
278
+ flags=0,
279
+ size=max(current_height, 0.0),
280
+ bold=False,
281
+ italic=False,
282
+ underline=False,
283
+ )
284
+ )
285
+ current_texts = []
286
+ current_boxes = []
287
+ current_height = 0.0
288
+ current_texts.append(text)
289
+ current_boxes.append(box)
290
+ current_height = max(current_height, box_height)
291
+ previous_right = x1
292
+ if current_texts:
293
+ merged_box = union_bbox(current_boxes)
294
+ fragments.append(
295
+ StyledFragment(
296
+ text=" ".join(current_texts),
297
+ bbox=merged_box,
298
+ font="",
299
+ flags=0,
300
+ size=max(current_height, 0.0),
301
+ bold=False,
302
+ italic=False,
303
+ underline=False,
304
+ )
305
+ )
306
+ return fragments
307
+
308
+
309
+ def group_rows(items: Sequence[StyledFragment], *, y_tolerance: float = 3.5) -> List[RowGroup]:
310
+ rows: List[RowGroup] = []
311
+ for item in sorted(items, key=lambda fragment: (center_y(fragment.bbox), fragment.bbox[0])):
312
+ item_cy = center_y(item.bbox)
313
+ if rows and abs(rows[-1].cy - item_cy) <= y_tolerance:
314
+ rows[-1].items.append(item)
315
+ rows[-1].cy = float(np.mean([center_y(existing.bbox) for existing in rows[-1].items]))
316
+ continue
317
+ rows.append(RowGroup(cy=item_cy, items=[item]))
318
+ return rows
319
+
320
+
321
+ def is_headerish_row(row: RowGroup, *, page_width: float) -> bool:
322
+ bbox = row.bbox
323
+ text = " ".join(item.text for item in row.items)
324
+ centered = bbox[0] > (page_width * 0.25) and bbox[2] < (page_width * 0.85)
325
+ short_text = len(text) <= 80
326
+ return centered or short_text or row.items[0].italic
327
+
328
+
329
+ def _fragment_style_weight(fragment: StyledFragment) -> float:
330
+ normalized_text = "".join(char for char in str(fragment.text or "") if not char.isspace())
331
+ if normalized_text:
332
+ return float(len(normalized_text))
333
+ return max(1.0, width(fragment.bbox))
334
+
335
+
336
+ def _fragment_style_fraction(
337
+ fragments: Sequence[StyledFragment],
338
+ *,
339
+ attr_name: str,
340
+ ) -> float:
341
+ total_weight = 0.0
342
+ styled_weight = 0.0
343
+ for fragment in fragments:
344
+ weight = _fragment_style_weight(fragment)
345
+ total_weight += weight
346
+ if bool(getattr(fragment, attr_name, False)):
347
+ styled_weight += weight
348
+ if total_weight <= 0:
349
+ return 0.0
350
+ return styled_weight / total_weight
351
+
352
+
353
+ def _native_cell_bold_min_fraction() -> float:
354
+ raw_value = str(os.getenv("PDF_NATIVE_CELL_BOLD_MIN_FRACTION", "0.60")).strip()
355
+ try:
356
+ return min(1.0, max(0.0, float(raw_value)))
357
+ except ValueError:
358
+ return 0.60
359
+
360
+
361
+ def drawing_based_candidates(page: fitz.Page) -> List[Tuple[float, float, float, float]]:
362
+ candidate_boxes: List[Tuple[float, float, float, float]] = []
363
+ page_width = float(page.rect.width)
364
+ for drawing in page.get_drawings():
365
+ rect = drawing.get("rect")
366
+ if rect is None:
367
+ continue
368
+ candidate = (float(rect.x0), float(rect.y0), float(rect.x1), float(rect.y1))
369
+ if width(candidate) < (page_width * 0.35):
370
+ continue
371
+ draw_type = str(drawing.get("type") or "")
372
+ if draw_type in {"f", "fs"} and height(candidate) <= 30.0:
373
+ candidate_boxes.append(candidate)
374
+ continue
375
+ if draw_type in {"s", "fs"} and height(candidate) <= 2.0:
376
+ candidate_boxes.append(candidate)
377
+ candidate_boxes.sort(key=lambda box: (box[1], box[0]))
378
+
379
+ merged: List[Tuple[float, float, float, float]] = []
380
+ for candidate in candidate_boxes:
381
+ if (
382
+ merged
383
+ and candidate[1] - merged[-1][3] <= 18.0
384
+ and min(candidate[2], merged[-1][2]) - max(candidate[0], merged[-1][0]) > (page_width * 0.20)
385
+ ):
386
+ merged[-1] = (
387
+ min(merged[-1][0], candidate[0]),
388
+ min(merged[-1][1], candidate[1]),
389
+ max(merged[-1][2], candidate[2]),
390
+ max(merged[-1][3], candidate[3]),
391
+ )
392
+ continue
393
+ merged.append(candidate)
394
+ return merged
395
+
396
+
397
+ def row_is_tabular_core(row: RowGroup) -> bool:
398
+ numeric_count = sum(is_numeric_like(item.text) for item in row.items)
399
+ if len(row.items) >= 4:
400
+ return True
401
+ if len(row.items) >= 3 and numeric_count >= 1:
402
+ return True
403
+ if len(row.items) >= 2 and numeric_count >= 2:
404
+ return True
405
+ return False
406
+
407
+
408
+ def row_is_tabular_support(row: RowGroup) -> bool:
409
+ numeric_count = sum(is_numeric_like(item.text) for item in row.items)
410
+ if row_is_tabular_core(row):
411
+ return True
412
+ if len(row.items) >= 2 and numeric_count >= 1:
413
+ return True
414
+ return False
415
+
416
+
417
+ def alignment_based_candidates(
418
+ rows: Sequence[RowGroup],
419
+ *,
420
+ page_width: float,
421
+ ) -> List[Tuple[float, float, float, float]]:
422
+ candidates: List[Tuple[float, float, float, float]] = []
423
+ index = 0
424
+ while index < len(rows):
425
+ row = rows[index]
426
+ if not row_is_tabular_core(row):
427
+ index += 1
428
+ continue
429
+ start = index
430
+ end = index + 1
431
+ while end < len(rows):
432
+ row_gap = rows[end].bbox[1] - rows[end - 1].bbox[3]
433
+ if row_gap > 22.0 or not row_is_tabular_support(rows[end]):
434
+ break
435
+ end += 1
436
+ run = list(rows[start:end])
437
+ core_count = sum(row_is_tabular_core(candidate_row) for candidate_row in run)
438
+ avg_items = float(np.mean([len(candidate_row.items) for candidate_row in run])) if run else 0.0
439
+ if core_count >= 3 and avg_items >= 2.8:
440
+ expanded_start = start
441
+ for _ in range(3):
442
+ if expanded_start <= 0:
443
+ break
444
+ previous_row = rows[expanded_start - 1]
445
+ gap = rows[expanded_start].bbox[1] - previous_row.bbox[3]
446
+ if gap > 18.0 or not is_headerish_row(previous_row, page_width=page_width):
447
+ break
448
+ expanded_start -= 1
449
+ expanded_end = end
450
+ while expanded_end < len(rows):
451
+ next_row = rows[expanded_end]
452
+ if rows[expanded_end].bbox[1] - rows[expanded_end - 1].bbox[3] > 18.0:
453
+ break
454
+ if len(next_row.items) > 2:
455
+ break
456
+ next_text = " ".join(item.text for item in next_row.items).strip()
457
+ if not next_text.startswith("("):
458
+ break
459
+ expanded_end += 1
460
+ run_rows = rows[expanded_start:expanded_end]
461
+ candidate_box = union_bbox(row_group.bbox for row_group in run_rows)
462
+ if width(candidate_box) >= (page_width * 0.35):
463
+ candidates.append(candidate_box)
464
+ index = max(end, index + 1)
465
+ return candidates
466
+
467
+
468
+ def filter_fragments_in_rect(
469
+ rect: Sequence[float],
470
+ *,
471
+ fragments: Sequence[StyledFragment],
472
+ ) -> List[StyledFragment]:
473
+ left, top, right, bottom = (float(value) for value in rect)
474
+ kept: List[StyledFragment] = []
475
+ for fragment in fragments:
476
+ x0, y0, x1, y1 = fragment.bbox
477
+ cx = (x0 + x1) / 2.0
478
+ cy = (y0 + y1) / 2.0
479
+ if left <= cx <= right and top <= cy <= bottom:
480
+ kept.append(fragment)
481
+ return kept
482
+
483
+
484
+ def score_candidate(
485
+ rect: Sequence[float],
486
+ *,
487
+ fragments: Sequence[StyledFragment],
488
+ ) -> Optional[TableCandidate]:
489
+ inside_fragments = filter_fragments_in_rect(rect, fragments=fragments)
490
+ if len(inside_fragments) < 6:
491
+ return None
492
+ rows = group_rows(inside_fragments)
493
+ dense_rows = [row for row in rows if len(row.items) >= 2]
494
+ if len(dense_rows) < 3:
495
+ return None
496
+ numeric_fragments = sum(is_numeric_like(fragment.text) for fragment in inside_fragments)
497
+ bold_fragments = sum(bool(fragment.bold) for fragment in inside_fragments)
498
+ score = (len(dense_rows) * 5.0) + min(20.0, float(numeric_fragments)) + min(10.0, float(bold_fragments))
499
+ return TableCandidate(
500
+ rect=tuple(float(value) for value in rect),
501
+ source="scored",
502
+ score=score,
503
+ rows=rows,
504
+ )
505
+
506
+
507
+ def dedupe_candidates(candidates: Sequence[TableCandidate]) -> List[TableCandidate]:
508
+ kept: List[TableCandidate] = []
509
+ for candidate in sorted(candidates, key=lambda item: (item.score, width(item.rect) * height(item.rect)), reverse=True):
510
+ if any(bbox_iou(candidate.rect, existing.rect) >= 0.85 for existing in kept):
511
+ continue
512
+ kept.append(candidate)
513
+ return kept
514
+
515
+
516
+ def render_fragment_html(fragment: StyledFragment) -> str:
517
+ rendered = html.escape(fragment.text, quote=False)
518
+ if fragment.bold:
519
+ rendered = f"<strong>{rendered}</strong>"
520
+ if fragment.italic:
521
+ rendered = f"<em>{rendered}</em>"
522
+ if fragment.underline:
523
+ rendered = f"<u>{rendered}</u>"
524
+ return rendered
525
+
526
+
527
+ def column_anchor_x(fragment: StyledFragment) -> float:
528
+ if is_numeric_like(fragment.text):
529
+ return float(fragment.bbox[2])
530
+ return float(fragment.bbox[0])
531
+
532
+
533
+ def infer_column_centers(
534
+ rows: Sequence[RowGroup],
535
+ *,
536
+ table_rect: Sequence[float],
537
+ ) -> List[float]:
538
+ table_width = width(table_rect)
539
+ tolerance = max(14.0, table_width * 0.03)
540
+ centers: List[float] = []
541
+ counts: List[int] = []
542
+ dense_rows = [row for row in rows if len(row.items) >= 2]
543
+ for row in dense_rows:
544
+ for item in sorted(row.items, key=lambda fragment: fragment.bbox[0]):
545
+ item_center = column_anchor_x(item)
546
+ matched_index: Optional[int] = None
547
+ for index, existing_center in enumerate(centers):
548
+ if abs(existing_center - item_center) <= tolerance:
549
+ matched_index = index
550
+ break
551
+ if matched_index is None:
552
+ centers.append(item_center)
553
+ counts.append(1)
554
+ continue
555
+ counts[matched_index] += 1
556
+ centers[matched_index] = (
557
+ (centers[matched_index] * float(counts[matched_index] - 1)) + item_center
558
+ ) / float(counts[matched_index])
559
+ ranked_pairs = sorted(
560
+ zip(centers, counts),
561
+ key=lambda pair: pair[0],
562
+ )
563
+ filtered = [center for center, count in ranked_pairs if count >= 2]
564
+ if filtered:
565
+ return filtered
566
+ if dense_rows:
567
+ widest_row = max(dense_rows, key=lambda row: len(row.items))
568
+ return [column_anchor_x(item) for item in sorted(widest_row.items, key=lambda fragment: fragment.bbox[0])]
569
+ return []
570
+
571
+
572
+ def build_column_boundaries(
573
+ column_centers: Sequence[float],
574
+ *,
575
+ table_rect: Sequence[float],
576
+ ) -> List[float]:
577
+ if not column_centers:
578
+ return [float(table_rect[0]), float(table_rect[2])]
579
+ boundaries = [float(table_rect[0])]
580
+ sorted_centers = sorted(float(value) for value in column_centers)
581
+ for index in range(len(sorted_centers) - 1):
582
+ boundaries.append((sorted_centers[index] + sorted_centers[index + 1]) / 2.0)
583
+ boundaries.append(float(table_rect[2]))
584
+ return boundaries
585
+
586
+
587
+ def build_row_boundaries(
588
+ rows: Sequence[RowGroup],
589
+ *,
590
+ table_rect: Sequence[float],
591
+ ) -> List[float]:
592
+ if not rows:
593
+ return [float(table_rect[1]), float(table_rect[3])]
594
+ boundaries = [float(table_rect[1])]
595
+ for index in range(len(rows) - 1):
596
+ boundaries.append((float(rows[index].bbox[3]) + float(rows[index + 1].bbox[1])) / 2.0)
597
+ boundaries.append(float(table_rect[3]))
598
+ return boundaries
599
+
600
+
601
+ def infer_header_row_count(rows: Sequence[RowGroup]) -> int:
602
+ header_row_count = 0
603
+ for row in rows[:3]:
604
+ numeric_count = sum(is_numeric_like(item.text) for item in row.items)
605
+ bold_count = sum(bool(item.bold) for item in row.items)
606
+ if header_row_count == 0 and (numeric_count == 0 or len(row.items) <= 2):
607
+ header_row_count += 1
608
+ continue
609
+ if numeric_count == 0 and len(row.items) >= 2:
610
+ header_row_count += 1
611
+ continue
612
+ if bold_count >= max(1, len(row.items) - 1) and numeric_count <= 2:
613
+ header_row_count += 1
614
+ continue
615
+ break
616
+ return header_row_count
617
+
618
+
619
+ def assign_fragment_to_columns(
620
+ fragment: StyledFragment,
621
+ *,
622
+ column_centers: Sequence[float],
623
+ boundaries: Sequence[float],
624
+ allow_spans: bool,
625
+ ) -> Tuple[int, int]:
626
+ if len(boundaries) < 2:
627
+ return 0, 0
628
+ item_left, _item_top, item_right, _item_bottom = fragment.bbox
629
+ item_width = max(1.0, item_right - item_left)
630
+ overlapping_columns: List[int] = []
631
+ for index in range(len(boundaries) - 1):
632
+ boundary_left = float(boundaries[index])
633
+ boundary_right = float(boundaries[index + 1])
634
+ overlap = max(0.0, min(item_right, boundary_right) - max(item_left, boundary_left))
635
+ if overlap >= max(4.0, item_width * 0.12):
636
+ overlapping_columns.append(index)
637
+ if overlapping_columns:
638
+ if not allow_spans:
639
+ nearest_index = min(
640
+ overlapping_columns,
641
+ key=lambda index: abs(column_anchor_x(fragment) - float(column_centers[index])),
642
+ )
643
+ return nearest_index, nearest_index
644
+ return overlapping_columns[0], overlapping_columns[-1]
645
+ if not column_centers:
646
+ return 0, 0
647
+ nearest_index = min(
648
+ range(len(column_centers)),
649
+ key=lambda index: abs(column_anchor_x(fragment) - float(column_centers[index])),
650
+ )
651
+ return nearest_index, nearest_index
652
+
653
+
654
+ def infer_table_candidate_cells(candidate: TableCandidate) -> List[TableCellCandidate]:
655
+ rows = [RowGroup(cy=row.cy, items=sorted(row.items, key=lambda fragment: fragment.bbox[0])) for row in candidate.rows]
656
+ if not rows:
657
+ return []
658
+ column_centers = infer_column_centers(rows, table_rect=candidate.rect)
659
+ if not column_centers:
660
+ return []
661
+ boundaries = build_column_boundaries(column_centers, table_rect=candidate.rect)
662
+ row_boundaries = build_row_boundaries(rows, table_rect=candidate.rect)
663
+ header_row_count = infer_header_row_count(rows)
664
+
665
+ cells: List[TableCellCandidate] = []
666
+ for row_index, row in enumerate(rows):
667
+ assigned_cells: List[Dict[str, Any]] = []
668
+ for fragment in row.items:
669
+ start_column, end_column = assign_fragment_to_columns(
670
+ fragment,
671
+ column_centers=column_centers,
672
+ boundaries=boundaries,
673
+ allow_spans=(len(row.items) == 1),
674
+ )
675
+ fragment_html = render_fragment_html(fragment)
676
+ if assigned_cells and start_column <= int(assigned_cells[-1]["end_column"]):
677
+ assigned_cells[-1]["end_column"] = max(int(assigned_cells[-1]["end_column"]), end_column)
678
+ assigned_cells[-1]["html"] += "<br>" + fragment_html
679
+ assigned_cells[-1]["texts"].append(fragment.text)
680
+ assigned_cells[-1]["fragments"].append(fragment)
681
+ continue
682
+ assigned_cells.append(
683
+ {
684
+ "start_column": start_column,
685
+ "end_column": end_column,
686
+ "html": fragment_html,
687
+ "texts": [fragment.text],
688
+ "fragments": [fragment],
689
+ }
690
+ )
691
+
692
+ cursor = 0
693
+ for cell in assigned_cells:
694
+ start_column = max(cursor, int(cell["start_column"]))
695
+ end_column = max(start_column, int(cell["end_column"]))
696
+ cursor = end_column + 1
697
+ left = float(boundaries[start_column])
698
+ right = float(boundaries[end_column + 1])
699
+ top = float(row_boundaries[row_index])
700
+ bottom = float(row_boundaries[row_index + 1])
701
+ fragments = list(cell["fragments"])
702
+ cells.append(
703
+ TableCellCandidate(
704
+ row=row_index,
705
+ col=start_column,
706
+ rowspan=1,
707
+ colspan=max(1, end_column - start_column + 1),
708
+ bbox=(left, top, right, bottom),
709
+ text="\n".join(str(piece) for piece in cell["texts"] if str(piece).strip()),
710
+ html=str(cell["html"]),
711
+ header=(row_index < header_row_count),
712
+ bold=(
713
+ _fragment_style_fraction(fragments, attr_name="bold")
714
+ >= _native_cell_bold_min_fraction()
715
+ ),
716
+ italic=any(bool(fragment.italic) for fragment in fragments),
717
+ underline=any(bool(fragment.underline) for fragment in fragments),
718
+ )
719
+ )
720
+ return cells
721
+
722
+
723
+ def render_table_candidate_html(candidate: TableCandidate) -> str:
724
+ cells = infer_table_candidate_cells(candidate)
725
+ if not cells:
726
+ return ""
727
+ row_count = max(int(cell.row + cell.rowspan) for cell in cells)
728
+ column_count = max(int(cell.col + cell.colspan) for cell in cells)
729
+ cells_by_row: Dict[int, List[TableCellCandidate]] = {}
730
+ for cell in cells:
731
+ cells_by_row.setdefault(int(cell.row), []).append(cell)
732
+
733
+ parts: List[str] = ["<table>"]
734
+ for row_index in range(row_count):
735
+ row_cells = sorted(cells_by_row.get(row_index, []), key=lambda cell: (cell.col, cell.colspan))
736
+ default_tag_name = "th" if any(bool(cell.header) for cell in row_cells) else "td"
737
+ parts.append("<tr>")
738
+ cursor = 0
739
+ for cell in row_cells:
740
+ start_column = max(cursor, int(cell.col))
741
+ end_column = max(start_column, int(cell.col + cell.colspan - 1))
742
+ while cursor < start_column:
743
+ parts.append(f"<{default_tag_name}></{default_tag_name}>")
744
+ cursor += 1
745
+ colspan = max(1, end_column - start_column + 1)
746
+ colspan_attr = f' colspan="{colspan}"' if colspan > 1 else ""
747
+ tag_name = "th" if cell.header else "td"
748
+ parts.append(f"<{tag_name}{colspan_attr}>{cell.html}</{tag_name}>")
749
+ cursor = end_column + 1
750
+ while cursor < column_count:
751
+ parts.append(f"<{default_tag_name}></{default_tag_name}>")
752
+ cursor += 1
753
+ parts.append("</tr>")
754
+ parts.append("</table>")
755
+ return "".join(parts)
756
+
757
+
758
+ def detect_native_tables(page: fitz.Page) -> Dict[str, Any]:
759
+ timings: Dict[str, float] = {}
760
+ started_at = time.perf_counter()
761
+
762
+ layout_started_at = time.perf_counter()
763
+ layout_fragments = extract_layout_fragments(page)
764
+ timings["layout_extraction_ms"] = (time.perf_counter() - layout_started_at) * 1000.0
765
+
766
+ page_width = float(page.rect.width)
767
+ grouped_rows = group_rows(layout_fragments)
768
+
769
+ alignment_started_at = time.perf_counter()
770
+ alignment_rects = alignment_based_candidates(grouped_rows, page_width=page_width)
771
+ timings["alignment_candidate_ms"] = (time.perf_counter() - alignment_started_at) * 1000.0
772
+ if not alignment_rects:
773
+ timings["total_detection_ms"] = (time.perf_counter() - started_at) * 1000.0
774
+ return {
775
+ "mode": "pdf_native",
776
+ "fragments": layout_fragments,
777
+ "tables": [],
778
+ "timings_ms": timings,
779
+ "row_count": len(grouped_rows),
780
+ }
781
+
782
+ spans_started_at = time.perf_counter()
783
+ fragments = extract_styled_fragments(page)
784
+ timings["span_extraction_ms"] = (time.perf_counter() - spans_started_at) * 1000.0
785
+
786
+ drawing_started_at = time.perf_counter()
787
+ drawing_rects = drawing_based_candidates(page)
788
+ timings["drawing_candidate_ms"] = (time.perf_counter() - drawing_started_at) * 1000.0
789
+
790
+ scored_candidates: List[TableCandidate] = []
791
+ for rect in [*drawing_rects, *alignment_rects]:
792
+ candidate = score_candidate(rect, fragments=fragments)
793
+ if candidate is None:
794
+ continue
795
+ candidate.source = "drawing" if rect in drawing_rects else "alignment"
796
+ scored_candidates.append(candidate)
797
+
798
+ deduped_candidates = dedupe_candidates(scored_candidates)
799
+ timings["total_detection_ms"] = (time.perf_counter() - started_at) * 1000.0
800
+ return {
801
+ "mode": "pdf_native",
802
+ "fragments": fragments,
803
+ "tables": deduped_candidates,
804
+ "timings_ms": timings,
805
+ "row_count": len(grouped_rows),
806
+ }
807
+
808
+
809
+ def render_page_image(page: fitz.Page, *, zoom: float = 2.0) -> Image.Image:
810
+ pix = page.get_pixmap(matrix=fitz.Matrix(zoom, zoom), alpha=False)
811
+ return Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
812
+
813
+
814
+ def group_component_boxes_into_rows(
815
+ component_boxes: Sequence[Sequence[int]],
816
+ *,
817
+ y_tolerance: float,
818
+ ) -> List[List[Tuple[int, int, int, int]]]:
819
+ rows: List[List[Tuple[int, int, int, int]]] = []
820
+ row_centers: List[float] = []
821
+ for box in sorted(
822
+ ((int(box[0]), int(box[1]), int(box[2]), int(box[3])) for box in component_boxes),
823
+ key=lambda item: (((item[1] + item[3]) / 2.0), item[0]),
824
+ ):
825
+ cy = (box[1] + box[3]) / 2.0
826
+ if rows and abs(row_centers[-1] - cy) <= y_tolerance:
827
+ rows[-1].append(box)
828
+ row_centers[-1] = float(np.mean([(item[1] + item[3]) / 2.0 for item in rows[-1]]))
829
+ continue
830
+ rows.append([box])
831
+ row_centers.append(cy)
832
+ return rows
833
+
834
+
835
+ def analyze_raster_candidate(
836
+ *,
837
+ text_mask: np.ndarray,
838
+ line_mask: np.ndarray,
839
+ x: int,
840
+ y: int,
841
+ width_px_box: int,
842
+ height_px_box: int,
843
+ scale_x: float,
844
+ scale_y: float,
845
+ page_height: float,
846
+ ) -> Optional[RasterCandidate]:
847
+ try:
848
+ import cv2 # type: ignore
849
+ except ImportError as exc: # pragma: no cover - depends on environment
850
+ raise RuntimeError("Raster fallback requires cv2 (opencv-python-headless).") from exc
851
+
852
+ crop_text = text_mask[y : y + height_px_box, x : x + width_px_box]
853
+ crop_lines = line_mask[y : y + height_px_box, x : x + width_px_box]
854
+ if crop_text.size == 0:
855
+ return None
856
+
857
+ cc_kernel = cv2.getStructuringElement(
858
+ cv2.MORPH_RECT,
859
+ (max(4, width_px_box // 120), max(2, height_px_box // 160)),
860
+ )
861
+ grouped_components = cv2.dilate(crop_text, cc_kernel, iterations=1)
862
+ component_count, _labels, stats, _centroids = cv2.connectedComponentsWithStats(grouped_components, connectivity=8)
863
+
864
+ component_boxes: List[Tuple[int, int, int, int]] = []
865
+ for component_index in range(1, component_count):
866
+ comp_x, comp_y, comp_w, comp_h, comp_area = stats[component_index]
867
+ if comp_area < 20 or comp_w < 4 or comp_h < 4:
868
+ continue
869
+ component_boxes.append((comp_x, comp_y, comp_x + comp_w, comp_y + comp_h))
870
+
871
+ row_groups = group_component_boxes_into_rows(
872
+ component_boxes,
873
+ y_tolerance=max(8.0, height_px_box * 0.01),
874
+ )
875
+ populated_rows = [row for row in row_groups if row]
876
+ row_count = len(populated_rows)
877
+ avg_components_per_row = (
878
+ float(np.mean([len(row) for row in populated_rows])) if populated_rows else 0.0
879
+ )
880
+ line_density = float((crop_lines > 0).mean())
881
+ text_density = float((crop_text > 0).mean())
882
+
883
+ # Narrative paragraphs tend to have dense full-width text and fewer disconnected
884
+ # column clusters per row than real tables.
885
+ if line_density < 0.003 and avg_components_per_row < 2.4:
886
+ return None
887
+ if text_density > 0.09 and line_density < 0.003:
888
+ return None
889
+ if line_density > 0.12:
890
+ return None
891
+ if row_count < 4:
892
+ return None
893
+
894
+ left = x * scale_x
895
+ top = y * scale_y
896
+ right = (x + width_px_box) * scale_x
897
+ bottom = (y + height_px_box) * scale_y
898
+ candidate_box = (left, top, right, bottom)
899
+ score = (
900
+ (avg_components_per_row * 8.0)
901
+ + (line_density * 220.0)
902
+ + (text_density * 35.0)
903
+ + (height(candidate_box) / max(1.0, page_height) * 8.0)
904
+ )
905
+ return RasterCandidate(
906
+ rect=candidate_box,
907
+ score=score,
908
+ row_count=row_count,
909
+ avg_components_per_row=avg_components_per_row,
910
+ text_density=text_density,
911
+ line_density=line_density,
912
+ )
913
+
914
+
915
+ def detect_raster_table_regions(page: fitz.Page) -> Dict[str, Any]:
916
+ try:
917
+ import cv2 # type: ignore
918
+ except ImportError as exc: # pragma: no cover - depends on environment
919
+ raise RuntimeError("Raster fallback requires cv2 (opencv-python-headless).") from exc
920
+
921
+ timings: Dict[str, float] = {}
922
+ started_at = time.perf_counter()
923
+
924
+ render_started_at = time.perf_counter()
925
+ image = render_page_image(page, zoom=2.0)
926
+ timings["render_ms"] = (time.perf_counter() - render_started_at) * 1000.0
927
+
928
+ grayscale_started_at = time.perf_counter()
929
+ rgb = np.asarray(image, dtype=np.uint8)
930
+ gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY)
931
+ _threshold, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
932
+ timings["binarize_ms"] = (time.perf_counter() - grayscale_started_at) * 1000.0
933
+
934
+ morphology_started_at = time.perf_counter()
935
+ height_px, width_px = gray.shape[:2]
936
+ horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (max(40, width_px // 25), 1))
937
+ vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, max(40, height_px // 25)))
938
+ horizontal = cv2.morphologyEx(binary, cv2.MORPH_OPEN, horizontal_kernel)
939
+ vertical = cv2.morphologyEx(binary, cv2.MORPH_OPEN, vertical_kernel)
940
+ line_mask = cv2.bitwise_or(horizontal, vertical)
941
+ text_mask = cv2.subtract(binary, line_mask)
942
+ text_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (max(8, width_px // 180), max(6, height_px // 180)))
943
+ grouped_text = cv2.dilate(text_mask, text_kernel, iterations=1)
944
+ candidate_mask = cv2.dilate(
945
+ cv2.bitwise_or(grouped_text, line_mask),
946
+ cv2.getStructuringElement(cv2.MORPH_RECT, (max(20, width_px // 80), max(20, height_px // 80))),
947
+ iterations=1,
948
+ )
949
+ contours, _hierarchy = cv2.findContours(candidate_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
950
+ timings["morphology_ms"] = (time.perf_counter() - morphology_started_at) * 1000.0
951
+
952
+ scale_x = float(page.rect.width) / float(width_px)
953
+ scale_y = float(page.rect.height) / float(height_px)
954
+ candidates: List[RasterCandidate] = []
955
+ for contour in contours:
956
+ x, y, width_px_box, height_px_box = cv2.boundingRect(contour)
957
+ if width_px_box < (width_px * 0.20) or height_px_box < (height_px * 0.05):
958
+ continue
959
+ candidate = analyze_raster_candidate(
960
+ text_mask=text_mask,
961
+ line_mask=line_mask,
962
+ x=x,
963
+ y=y,
964
+ width_px_box=width_px_box,
965
+ height_px_box=height_px_box,
966
+ scale_x=scale_x,
967
+ scale_y=scale_y,
968
+ page_height=float(page.rect.height),
969
+ )
970
+ if candidate is not None:
971
+ candidates.append(candidate)
972
+
973
+ kept: List[RasterCandidate] = []
974
+ for candidate in sorted(candidates, key=lambda item: item.score, reverse=True):
975
+ if any(bbox_iou(candidate.rect, existing.rect) >= 0.85 for existing in kept):
976
+ continue
977
+ kept.append(candidate)
978
+
979
+ timings["total_detection_ms"] = (time.perf_counter() - started_at) * 1000.0
980
+ return {
981
+ "mode": "image_morphology",
982
+ "tables": kept[:6],
983
+ "timings_ms": timings,
984
+ }
985
+
986
+
987
+ def detect_tables_on_page(page: fitz.Page) -> Dict[str, Any]:
988
+ native_result = detect_native_tables(page)
989
+ native_fragment_count = len(native_result["fragments"])
990
+ if native_fragment_count >= MIN_NATIVE_SPAN_COUNT:
991
+ return native_result
992
+
993
+ raster_result = detect_raster_table_regions(page)
994
+ if native_result["tables"]:
995
+ return native_result
996
+ return raster_result
997
+
998
+
999
+ def build_table_payload(candidate: TableCandidate) -> Dict[str, Any]:
1000
+ payload = candidate.to_payload()
1001
+ payload["cells"] = [cell.to_payload() for cell in infer_table_candidate_cells(candidate)]
1002
+ html_fragment = render_table_candidate_html(candidate)
1003
+ if html_fragment:
1004
+ payload["html"] = html_fragment
1005
+ return payload
1006
+
1007
+
1008
+ def save_overlay(
1009
+ page: fitz.Page,
1010
+ *,
1011
+ detection_payload: Dict[str, Any],
1012
+ output_path: Path,
1013
+ ) -> None:
1014
+ image = render_page_image(page, zoom=2.0)
1015
+ draw = ImageDraw.Draw(image)
1016
+ scale_x = float(image.width) / float(page.rect.width)
1017
+ scale_y = float(image.height) / float(page.rect.height)
1018
+
1019
+ def scale_box(box: Sequence[float]) -> Tuple[float, float, float, float]:
1020
+ return (
1021
+ float(box[0]) * scale_x,
1022
+ float(box[1]) * scale_y,
1023
+ float(box[2]) * scale_x,
1024
+ float(box[3]) * scale_y,
1025
+ )
1026
+
1027
+ if detection_payload.get("mode") == "pdf_native":
1028
+ for table in detection_payload.get("tables", []):
1029
+ draw.rectangle(scale_box(table.rect), outline=(220, 40, 40), width=5)
1030
+ for row in table.rows:
1031
+ draw.rectangle(scale_box(row.bbox), outline=(255, 140, 0), width=2)
1032
+ for fragment in row.items:
1033
+ outline_color = (40, 170, 40) if fragment.bold else (40, 120, 220)
1034
+ draw.rectangle(scale_box(fragment.bbox), outline=outline_color, width=1)
1035
+ else:
1036
+ for table in detection_payload.get("tables", []):
1037
+ box = table.rect if isinstance(table, RasterCandidate) else table["bbox"]
1038
+ draw.rectangle(scale_box(box), outline=(220, 40, 40), width=5)
1039
+ output_path.parent.mkdir(parents=True, exist_ok=True)
1040
+ image.save(output_path)
1041
+
1042
+
1043
+ def build_payload(
1044
+ pdf_path: Path,
1045
+ *,
1046
+ page_number: int,
1047
+ detection_payload: Dict[str, Any],
1048
+ ) -> Dict[str, Any]:
1049
+ payload: Dict[str, Any] = {
1050
+ "pdf_path": str(pdf_path),
1051
+ "page_number": int(page_number),
1052
+ "mode": detection_payload.get("mode"),
1053
+ "timings_ms": {
1054
+ key: round(float(value), 2)
1055
+ for key, value in dict(detection_payload.get("timings_ms") or {}).items()
1056
+ },
1057
+ }
1058
+ if detection_payload.get("mode") == "pdf_native":
1059
+ payload["native_fragment_count"] = len(detection_payload.get("fragments") or [])
1060
+ payload["tables"] = [build_table_payload(table) for table in detection_payload.get("tables") or []]
1061
+ payload["html_fragments"] = [
1062
+ str(table_payload["html"])
1063
+ for table_payload in payload["tables"]
1064
+ if isinstance(table_payload, dict) and isinstance(table_payload.get("html"), str) and table_payload.get("html")
1065
+ ]
1066
+ else:
1067
+ payload["tables"] = [table.to_payload() for table in detection_payload.get("tables") or []]
1068
+ payload["html_fragments"] = []
1069
+ return payload
1070
+
1071
+
1072
+ def detect_tables_for_page_number(document: fitz.Document, *, page_number: int) -> Dict[str, Any]:
1073
+ opened_at = time.perf_counter()
1074
+ page = document.load_page(page_number - 1)
1075
+ detection_payload = detect_tables_on_page(page)
1076
+ detection_payload.setdefault("timings_ms", {})
1077
+ detection_payload["timings_ms"]["open_and_dispatch_ms"] = (time.perf_counter() - opened_at) * 1000.0
1078
+ return detection_payload
1079
+
1080
+
1081
+ def extract_tables_from_pdf_page(
1082
+ pdf_path: Path | str,
1083
+ *,
1084
+ page_number: int,
1085
+ overlay_path: Optional[Path | str] = None,
1086
+ ) -> Dict[str, Any]:
1087
+ resolved_pdf_path = Path(pdf_path).resolve()
1088
+ with fitz.open(str(resolved_pdf_path)) as document:
1089
+ detection_payload = detect_tables_for_page_number(document, page_number=page_number)
1090
+ if overlay_path:
1091
+ overlay_started_at = time.perf_counter()
1092
+ page = document.load_page(page_number - 1)
1093
+ save_overlay(page, detection_payload=detection_payload, output_path=Path(overlay_path).resolve())
1094
+ detection_payload["timings_ms"]["overlay_ms"] = (time.perf_counter() - overlay_started_at) * 1000.0
1095
+ return build_payload(resolved_pdf_path, page_number=page_number, detection_payload=detection_payload)
1096
+
1097
+
1098
+ def summarize_document_payloads(page_payloads: Sequence[Dict[str, Any]]) -> Dict[str, Any]:
1099
+ latencies_ms = [
1100
+ float(page_payload.get("timings_ms", {}).get("open_and_dispatch_ms"))
1101
+ for page_payload in page_payloads
1102
+ if isinstance(page_payload.get("timings_ms", {}).get("open_and_dispatch_ms"), (int, float))
1103
+ ]
1104
+ if latencies_ms:
1105
+ latency_array = np.asarray(latencies_ms, dtype=float)
1106
+ latency_summary = {
1107
+ "median_ms": round(float(np.median(latency_array)), 2),
1108
+ "p95_ms": round(float(np.percentile(latency_array, 95)), 2),
1109
+ "max_ms": round(float(np.max(latency_array)), 2),
1110
+ }
1111
+ else:
1112
+ latency_summary = {}
1113
+ return {
1114
+ "pages": len(page_payloads),
1115
+ "pages_with_tables": sum(bool(page_payload.get("tables")) for page_payload in page_payloads),
1116
+ "native_pages": sum(page_payload.get("mode") == "pdf_native" for page_payload in page_payloads),
1117
+ "raster_pages": sum(page_payload.get("mode") == "image_morphology" for page_payload in page_payloads),
1118
+ "latency_ms": latency_summary,
1119
+ }
1120
+
1121
+
1122
+ def extract_tables_from_pdf_document(
1123
+ pdf_path: Path | str,
1124
+ *,
1125
+ page_numbers: Optional[Sequence[int]] = None,
1126
+ ) -> Dict[str, Any]:
1127
+ resolved_pdf_path = Path(pdf_path).resolve()
1128
+ with fitz.open(str(resolved_pdf_path)) as document:
1129
+ total_pages = int(document.page_count)
1130
+ selected_pages = (
1131
+ [page_number for page_number in page_numbers if 1 <= int(page_number) <= total_pages]
1132
+ if page_numbers is not None
1133
+ else list(range(1, total_pages + 1))
1134
+ )
1135
+ page_payloads = [
1136
+ build_payload(
1137
+ resolved_pdf_path,
1138
+ page_number=page_number,
1139
+ detection_payload=detect_tables_for_page_number(document, page_number=page_number),
1140
+ )
1141
+ for page_number in selected_pages
1142
+ ]
1143
+ return {
1144
+ "pdf_path": str(resolved_pdf_path),
1145
+ "page_count": len(page_payloads),
1146
+ "pages": page_payloads,
1147
+ "summary": summarize_document_payloads(page_payloads),
1148
+ }
1149
+
1150
+
1151
+ def extract_table_html_fragments_from_pdf_page(
1152
+ pdf_path: Path | str,
1153
+ *,
1154
+ page_number: int,
1155
+ ) -> List[str]:
1156
+ payload = extract_tables_from_pdf_page(pdf_path, page_number=page_number)
1157
+ return [str(fragment) for fragment in payload.get("html_fragments") or [] if str(fragment).strip()]
1158
+
1159
+
1160
+ def parse_args() -> argparse.Namespace:
1161
+ parser = argparse.ArgumentParser(
1162
+ description="Fast table bbox/style extraction for PDF pages using native PDF spans and a lightweight raster fallback."
1163
+ )
1164
+ parser.add_argument("--pdf", required=True, help="Path to the input PDF.")
1165
+ parser.add_argument("--page", type=int, help="1-based page number.")
1166
+ parser.add_argument("--all-pages", action="store_true", help="Process the full PDF instead of a single page.")
1167
+ parser.add_argument("--output-json", help="Optional JSON output path.")
1168
+ parser.add_argument("--overlay-png", help="Optional debug overlay PNG path.")
1169
+ return parser.parse_args()
1170
+
1171
+
1172
+ def main() -> None:
1173
+ args = parse_args()
1174
+ pdf_path = Path(args.pdf).resolve()
1175
+ if not args.all_pages and args.page is None:
1176
+ raise SystemExit("Pass --page N for a single page or --all-pages for a full-document run.")
1177
+
1178
+ if args.all_pages:
1179
+ if args.overlay_png:
1180
+ raise SystemExit("--overlay-png is only supported with single-page mode.")
1181
+ payload = extract_tables_from_pdf_document(pdf_path)
1182
+ else:
1183
+ payload = extract_tables_from_pdf_page(
1184
+ pdf_path,
1185
+ page_number=max(1, int(args.page)),
1186
+ overlay_path=(Path(args.overlay_png).resolve() if args.overlay_png else None),
1187
+ )
1188
+ rendered = json.dumps(payload, indent=2, sort_keys=True)
1189
+ print(rendered)
1190
+ if args.output_json:
1191
+ output_path = Path(args.output_json).resolve()
1192
+ output_path.parent.mkdir(parents=True, exist_ok=True)
1193
+ output_path.write_text(rendered + "\n", encoding="utf-8")
1194
+
1195
+
1196
+ if __name__ == "__main__":
1197
+ main()
requirements.txt ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ annotated-types==0.7.0
2
+ anyio==4.12.0
3
+ beautifulsoup4==4.12.3
4
+ certifi==2025.11.12
5
+ charset-normalizer==3.4.4
6
+ eval_type_backport==0.3.1
7
+ greenlet==3.1.1
8
+ huggingface_hub==1.7.2
9
+ h11==0.16.0
10
+ httpcore==1.0.9
11
+ httpx==0.28.1
12
+ idna==3.11
13
+ imgkit==1.2.3
14
+ invoke==2.2.1
15
+ lxml==6.0.0
16
+ mistralai==1.9.11
17
+ numpy==2.3.5
18
+ pandas==2.3.3
19
+ pillow==12.0.0
20
+ playwright==1.48.0
21
+ pydantic==2.12.5
22
+ pydantic_core==2.41.5
23
+ pyee==12.0.0
24
+ PyMuPDF==1.24.11
25
+ PyPDF2==3.0.1
26
+ python-dateutil==2.9.0.post0
27
+ python-dotenv==1.0.1
28
+ pytz==2025.2
29
+ PyYAML==6.0.2
30
+ requests==2.32.3
31
+ setuptools==80.9.0
32
+ six==1.17.0
33
+ soupsieve==2.8
34
+ tabulate==0.9.0
35
+ accelerate==1.13.0
36
+ qwen-vl-utils==0.0.14
37
+ safetensors==0.7.0
38
+ torch==2.11.0
39
+ torchvision==0.26.0
40
+ transformers==5.3.0
41
+ typing_extensions==4.15.0
42
+ typing-inspection==0.4.2
43
+ tzdata==2025.2
44
+ urllib3==2.6.0
45
+ wheel==0.45.1
sec_parser/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ """Core parser package for SEC Edgar Filings Datasets."""
2
+
sec_parser/config.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+
4
+ def _env_flag(name: str, default: str = "0") -> bool:
5
+ return os.getenv(name, default).strip().lower() in {"1", "true", "yes", "on"}
6
+
7
+
8
+ def _env_int(name: str, default: int) -> int:
9
+ raw = os.getenv(name)
10
+ if raw is None or not raw.strip():
11
+ return default
12
+ return int(raw.strip())
13
+
14
+
15
+ class Config:
16
+
17
+ HTML_TIMEOUT_LIMIT = 15 # overall timeout for parsing a single HTML document (in minutes)
18
+ PDF_TIMEOUT_LIMIT = 12 # timeout for processing all PDF attachments within a filing (in minutes)
19
+
20
+ OCR_MODEL = os.getenv("MISTRAL_OCR_MODEL", "mistral-ocr-latest") # model used for OCR on PDFs and rendered page images
21
+
22
+ # Legacy settings for separate OCR-bench utilities. sec_parser no longer
23
+ # calls FireRed/Qwen for table-specific second-pass transcription.
24
+ FIRERED_MODEL_LOCAL_DIR = os.getenv("FIRERED_MODEL_LOCAL_DIR", "").strip() or None
25
+ FIRERED_MODEL_CACHE_DIR = os.getenv("FIRERED_MODEL_CACHE_DIR", "").strip() or None
26
+ FIRERED_MODEL_REVISION = os.getenv("FIRERED_MODEL_REVISION", "").strip() or None
27
+ FIRERED_LOCAL_FILES_ONLY = _env_flag("FIRERED_LOCAL_FILES_ONLY")
28
+ FIRERED_DEVICE = os.getenv("FIRERED_DEVICE", "auto").strip().lower() or "auto"
29
+ FIRERED_DEVICE_MAP = os.getenv("FIRERED_DEVICE_MAP", "auto").strip() or "auto"
30
+ FIRERED_MAX_NEW_TOKENS = _env_int("FIRERED_MAX_NEW_TOKENS", 4096)
31
+ FIRERED_MAX_IMAGE_PIXELS = _env_int("FIRERED_MAX_IMAGE_PIXELS", 0)
32
+ FIRERED_MPS_AUTO_MAX_IMAGE_PIXELS = _env_int("FIRERED_MPS_AUTO_MAX_IMAGE_PIXELS", 2000000)
33
+
34
+ PER_TABLE_SLEEP_SECONDS = 0.375 # legacy delay for old second-pass table OCR utilities
35
+ API_MAX_RETRIES = 5 # max number of retries for failed OCR API calls
36
+ API_INITIAL_DELAY_SECONDS = 0.5 # initial delay before the first API retry; increases exponentially
37
+
38
+ PDF_BATCH_SIZE = 10 # number of PDF pages to process in a single batch to the OCR API (10 was found to be the most efficient on my machine)
39
+ IMAGE_RENDER_ZOOM = 3 # zoom factor for rendering PDF pages to images for OCR (probably wouldn't touch this)
sec_parser/hardcodes.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from typing import List, Sequence
3
+
4
+
5
+ _STACKED_HEADER_COLLAPSES = [
6
+ {
7
+ "name": "net_unrealized_appreciation_depreciation",
8
+ "rows": [
9
+ ["", "Net Unrealized", ""],
10
+ ["", "Appreciation", ""],
11
+ ["", "(Depreciation)", "Net Unrealized"],
12
+ ["", "as a % of", "Appreciation"],
13
+ ["", "Trust Capital", "(Depreciation)"],
14
+ ],
15
+ "replacement": [
16
+ "",
17
+ "**Net Unrealized<br>Appreciation<br>(Depreciation)<br>as a % of<br>Trust Capital**",
18
+ "**Net Unrealized<br>Appreciation<br>(Depreciation)**",
19
+ ],
20
+ },
21
+ ]
22
+
23
+
24
+ def _split_markdown_row(line: str) -> List[str] | None:
25
+ stripped = line.strip()
26
+ if not stripped.startswith("|") or not stripped.endswith("|"):
27
+ return None
28
+ return [cell.strip() for cell in stripped[1:-1].split("|")]
29
+
30
+
31
+ def _normalize_match_cell(cell: str) -> str:
32
+ cell = re.sub(r"</?u>", "", cell, flags=re.IGNORECASE)
33
+ cell = re.sub(r"(?i)<br\s*/?>", " ", cell)
34
+ cell = cell.replace("**", "")
35
+ cell = cell.replace("&nbsp;", " ")
36
+ cell = re.sub(r"\s+", " ", cell)
37
+ if cell.strip().lower() == "nan":
38
+ return ""
39
+ return cell.strip()
40
+
41
+
42
+ def _row_matches(line: str, expected_cells: Sequence[str]) -> bool:
43
+ cells = _split_markdown_row(line)
44
+ if cells is None or len(cells) != len(expected_cells):
45
+ return False
46
+ return all(
47
+ _normalize_match_cell(actual) == expected
48
+ for actual, expected in zip(cells, expected_cells)
49
+ )
50
+
51
+
52
+ def _format_markdown_row(cells: Sequence[str]) -> str:
53
+ return "| " + " | ".join(cells) + " |"
54
+
55
+
56
+ def apply_markdown_hardcodes(markdown_text: str) -> str:
57
+ """
58
+ Apply narrow markdown-level hardcoded fixes for recurring parser edge cases.
59
+ These are intentionally exact-pattern transforms so they do not affect
60
+ unrelated tables.
61
+ """
62
+ lines = markdown_text.splitlines()
63
+ rewritten: List[str] = []
64
+ i = 0
65
+
66
+ while i < len(lines):
67
+ matched = False
68
+
69
+ for hardcode in _STACKED_HEADER_COLLAPSES:
70
+ expected_rows = hardcode["rows"]
71
+ span = len(expected_rows)
72
+ if i + span > len(lines):
73
+ continue
74
+
75
+ if all(_row_matches(lines[i + offset], expected_rows[offset]) for offset in range(span)):
76
+ rewritten.append(_format_markdown_row(hardcode["replacement"]))
77
+ i += span
78
+ matched = True
79
+ break
80
+
81
+ if matched:
82
+ continue
83
+
84
+ rewritten.append(lines[i])
85
+ i += 1
86
+
87
+ return "\n".join(rewritten)
sec_parser/sec_parser.py ADDED
The diff for this file is too large to render. See raw diff
 
sec_parser/special_chars.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ WINGDINGS_MAP = {
2
+ "wingdings": {
3
+ ' ': '\u0020', '!': '✏', '"': '✂', '#': '✁', '$': '👓', '%': '🔔',
4
+ '&': '📖', '\'': '🕯', '(': '🕿', ')': '✆', '*': '🖂', '+': '🖃',
5
+ ',': '📪', '-': '📫', '.': '📬', '/': '📭', '0': '📁', '1': '📂',
6
+ '2': '📄', '3': '🗏', '4': '🗐', '5': '🗄', '6': '⌛', '7': '🖮',
7
+ '8': '🖰', '9': '🖲', ':': '🖳', ';': '🖴', '<': '🖫', '=': '🖬',
8
+ '>': '✇', '?': '✍', '@': '🖎', 'A': '✌', 'B': '👌', 'C': '👍',
9
+ 'D': '👎', 'E': '☜', 'F': '☞', 'G': '☝', 'H': '☟', 'I': '🖐',
10
+ 'J': '☺', 'K': '😐', 'L': '☹', 'M': '💣', 'N': '☠', 'O': '🏳',
11
+ 'P': '🏱', 'Q': '✈', 'R': '☼', 'S': '💧', 'T': '❄', 'U': '🕆',
12
+ 'V': '✞', 'W': '🕈', 'X': '✠', 'Y': '✡', 'Z': '☪', '[': '☯',
13
+ '\\': 'ॐ', ']': '☸', '^': '♈', '_': '♉', '`': '♊', 'a': '♋',
14
+ 'b': '♌', 'c': '♍', 'd': '♎', 'e': '♏', 'f': '♐', 'g': '♑',
15
+ 'h': '♒', 'i': '♓', 'j': '🙰', 'k': '🙵', 'l': '●', 'm': '🔾',
16
+ 'n': '■', 'o': '□', 'p': '🞐', 'q': '❑', 'r': '❒', 's': '⬧',
17
+ 't': '⧫', 'u': '◆', 'v': '❖', 'w': '⬥', 'x': '⌧', 'y': '⮹',
18
+ 'z': '⌘', '{': '🏵', '|': '🏶', '}': '🙶', '~': '🙷',
19
+ chr(128): '⓪', chr(129): '①', chr(130): '②', chr(131): '③', chr(132): '④',
20
+ chr(133): '⑤', chr(134): '⑥', chr(135): '⑦', chr(136): '⑧', chr(137): '⑨',
21
+ chr(138): '⑩', chr(139): '⓿', chr(140): '❶', chr(141): '❷', chr(142): '❸',
22
+ chr(143): '❹', chr(144): '❺', chr(145): '❻', chr(146): '❼', chr(147): '❽',
23
+ chr(148): '❾', chr(149): '❿', chr(150): '🙢', chr(151): '🙠', chr(152): '🙡',
24
+ chr(153): '🙣', chr(154): '🙞', chr(155): '🙜', chr(156): '🙝', chr(157): '🙟',
25
+ chr(158): '·', chr(159): '•', chr(160): '▪', '¡': '⚪', '¢': '🞆',
26
+ '£': '🞈', '¤': '◉', '¥': '◎', '¦': '🔿', '§': '▪', '¨': '◻',
27
+ '©': '🟂', 'ª': '✦', '«': '★', '¬': '✶', '­': '✴', '®': '✹',
28
+ '¯': '✵', '°': '⯐', '±': '⌖', '²': '⟡', '³': '⌑', '´': '⯑',
29
+ 'µ': '✪', '¶': '✰', '·': '🕐', '¸': '🕑', '¹': '🕒', 'º': '🕓',
30
+ '»': '🕔', '¼': '🕕', '½': '🕖', '¾': '🕗', '¿': '🕘', 'À': '🕙',
31
+ 'Á': '🕚', 'Â': '🕛', 'Ã': '⮰', 'Ä': '⮱', 'Å': '⮲', 'Æ': '⮳',
32
+ 'Ç': '⮴', 'È': '⮵', 'É': '⮶', 'Ê': '⮷', 'Ë': '🙪', 'Ì': '🙫',
33
+ 'Í': '🙕', 'Î': '🙔', 'Ï': '🙗', 'Ð': '🙖', 'Ñ': '🙐', 'Ò': '🙑',
34
+ 'Ó': '🙒', 'Ô': '🙓', 'Õ': '⌫', 'Ö': '⌦', '×': '⮘', 'Ø': '⮚',
35
+ 'Ù': '⮙', 'Ú': '⮛', 'Û': '⮈', 'Ü': '⮊', 'Ý': '⮉', 'Þ': '⮋',
36
+ 'ß': '🡨', 'à': '🡪', 'á': '🡩', 'â': '🡫', 'ã': '🡬', 'ä': '🡭',
37
+ 'å': '🡯', 'æ': '🡮', 'ç': '🡸', 'è': '🡺', 'é': '🡹', 'ê': '🡻',
38
+ 'ë': '🡼', 'ì': '🡽', 'í': '🡿', 'î': '🡾', 'ï': '⇦', 'ð': '⇨',
39
+ 'ñ': '⇧', 'ò': '⇩', 'ó': '⬄', 'ô': '⇳', 'õ': '⬀', 'ö': '⬁',
40
+ '÷': '⬃', 'ø': '⬂', 'ù': '🢬', 'ú': '🢭', 'û': '🗶', 'ü': '✔',
41
+ 'ý': '🗷', 'þ': '🗹', 'ÿ': '',
42
+ },
43
+ "wingdings 2": {
44
+ ' ': ' ', '!': '🖊', '"': '🖋', '#': '🖌', '$': '🖍', '%': '✄',
45
+ '&': '✀', '\'': '🕾', '(': '🕽', ')': '🗅', '*': '🗆', '+': '🗇',
46
+ ',': '🗈', '-': '🗉', '.': '🗊', '/': '🗋', '0': '🗌', '1': '🗍',
47
+ '2': '📋', '3': '🗑', '4': '🗔', '5': '🖵', '6': '🖶', '7': '🖷',
48
+ '8': '🖸', '9': '🖭', ':': '🖯', ';': '🖱', '<': '🖒', '=': '🖓',
49
+ '>': '🖘', '?': '🖙', '@': '🖚', 'A': '🖛', 'B': '👈', 'C': '👉',
50
+ 'D': '🖜', 'E': '🖝', 'F': '🖞', 'G': '🖟', 'H': '🖠', 'I': '🖡',
51
+ 'J': '👆', 'K': '👇', 'L': '🖢', 'M': '🖣', 'N': '🖑', 'O': '🗴',
52
+ 'P': '✓', 'Q': '🗵', 'R': '☑', 'S': '☒', 'T': '☒', 'U': '⮾',
53
+ 'V': '⮿', 'W': '⦸', 'X': '⦸', 'Y': '🙱', 'Z': '🙴', '[': '🙲',
54
+ '\\': '🙳', ']': '‽', '^': '🙹', '_': '🙺', '`': '🙻', 'a': '🙦',
55
+ 'b': '🙤', 'c': '🙥', 'd': '🙧', 'e': '🙚', 'f': '🙘', 'g': '🙙',
56
+ 'h': '🙛', 'i': '⓪', 'j': '①', 'k': '②', 'l': '③', 'm': '④',
57
+ 'n': '⑤', 'o': '⑥', 'p': '⑦', 'q': '⑧', 'r': '⑨', 's': '⑩',
58
+ 't': '⓿', 'u': '❶', 'v': '❷', 'w': '❸', 'x': '❹', 'y': '❺',
59
+ 'z': '❻', '{': '❼', '|': '❽', '}': '❾', '~': '❿',
60
+ '€': '☉', '‚': '☽', 'ƒ': '☾', '„': '⸿', '…': '✝', '†': '🕇',
61
+ '‡': '🕜', 'ˆ': '🕝', '‰': '🕞', 'Š': '🕟', '‹': '🕠', 'Œ': '🕡',
62
+ 'Ž': '🕣', '‘': '🕦', '’': '🕧', '“': '🙨', '”': '🙩', '•': '•',
63
+ '–': '●', '—': '⚫', '˜': '⬤', '™': '🞅', 'š': '🞆', '›': '🞇',
64
+ 'œ': '🞈', 'ž': '⦿', 'Ÿ': '◾', ' ': '■', '¡': '◼', '¢': '⬛',
65
+ '£': '⬜', '¤': '🞑', '¥': '🞒', '¦': '🞓', '§': '🞔', '¨': '▣',
66
+ '©': '🞕', 'ª': '🞖', '«': '🞗', '¬': '⬩', '­': '⬥', '®': '◆',
67
+ '¯': '◇', '°': '🞚', '±': '◈', '²': '🞛', '³': '🞜', '´': '🞝',
68
+ 'µ': '⬪', '¶': '⬧', '·': '⧫', '¸': '◊', '¹': '🞠', 'º': '◖',
69
+ '»': '◗', '¼': '⯊', '½': '⯋', '¾': '◼', '¿': '⬥', 'À': '⬟',
70
+ 'Á': '⯂', 'Â': '⬣', 'Ã': '⬢', 'Ä': '⯃', 'Å': '⯄', 'Æ': '🞡',
71
+ 'Ç': '🞢', 'È': '🞣', 'É': '🞤', 'Ê': '🞥', 'Ë': '🞦', 'Ì': '🞧',
72
+ 'Í': '🞨', 'Î': '🞩', 'Ï': '🞪', 'Ð': '🞫', 'Ñ': '🞬', 'Ò': '🞭',
73
+ 'Ó': '🞮', 'Ô': '🞯', 'Õ': '🞰', 'Ö': '🞱', '×': '🞲', 'Ø': '🞳',
74
+ 'Ù': '🞴', 'Ú': '🞵', 'Û': '🞶', 'Ü': '🞷', 'Ý': '🞸', 'Þ': '🞹',
75
+ 'ß': '🞺', 'à': '🞻', 'á': '🞼', 'â': '🞽', 'ã': '🞾', 'ä': '🞿',
76
+ 'å': '🟀', 'æ': '🟂', 'ç': '🟄', 'è': '✦', 'é': '🟉', 'ê': '★',
77
+ 'ë': '✶', 'ì': '🟋', 'í': '✷', 'î': '🟏', 'ï': '🟒', 'ð': '✹',
78
+ 'ñ': '🟃', 'ò': '🟇', 'ó': '✯', 'ô': '🟍', 'õ': '🟔', 'ö': '⯌',
79
+ '÷': '⯍', 'ø': '※', 'ù': '⁂',
80
+ },
81
+ "wingdings 3": {
82
+ ' ': ' ', '!': '⭠', '"': '⭢', '#': '⭡', '$': '⭣', '%': '⭦',
83
+ '&': '⭧', '\'': '⭩', '(': '⭨', ')': '⭰', '*': '⭲', '+': '⭱',
84
+ ',': '⭳', '-': '⭶', '.': '⭸', '/': '⭻', '0': '⭽', '1': '⭤',
85
+ '2': '⭥', '3': '⭪', '4': '⭬', '5': '⭫', '6': '⭭', '7': '⭍',
86
+ '8': '⮠', '9': '⮡', ':': '⮢', ';': '⮣', '<': '⮤', '=': '⮥',
87
+ '>': '⮦', '?': '⮧', '@': '⮐', 'A': '⮑', 'B': '⮒', 'C': '⮓',
88
+ 'D': '⮀', 'E': '⮃', 'F': '⭾', 'G': '⭿', 'H': '⮄', 'I': '⮆',
89
+ 'J': '⮅', 'K': '⮇', 'L': '⮏', 'M': '⮍', 'N': '⮎', 'O': '⮌',
90
+ 'P': '⭮', 'Q': '⭯', 'R': '⎋', 'S': '⌤', 'T': '⌃', 'U': '⌥',
91
+ 'V': '⎵', 'W': '⍽', 'X': '⇪', 'Y': '⮸', 'Z': '🢠', '[': '🢡',
92
+ '\\': '🢢', ']': '🢣', '^': '🢤', '_': '🢥', '`': '🢦', 'a': '🢧',
93
+ 'b': '🢨', 'c': '🢩', 'd': '🢪', 'e': '🢫', 'f': '←', 'g': '→',
94
+ 'h': '↑', 'i': '↓', 'j': '↖', 'k': '↗', 'l': '↙', 'm': '↘',
95
+ 'n': '🡘', 'o': '🡙', 'p': '▲', 'q': '▼', 'r': '△', 's': '▽',
96
+ 't': '◄', 'u': '►', 'v': '◁', 'w': '▷', 'x': '◣', 'y': '◢',
97
+ 'z': '◤', '{': '◥', '|': '🞀', '}': '🞂', '~': '🞁',
98
+ '€': '🞃', '‚': '▲', 'ƒ': '◀', '„': '▶', '…': '⮜', '†': '⮞',
99
+ '‡': '⮝', 'ˆ': '⮟', '‰': '🠐', 'Š': '🠒', '‹': '🠑', 'Œ': '🠓',
100
+ 'Ž': '🠖', '‘': '🠘', '’': '🠚', '“': '🠙', '”': '🠛', '•': '🠜',
101
+ '–': '🠞', '—': '🠝', '˜': '🠟', '™': '🠀', 'š': '🠂', '›': '🠁',
102
+ 'œ': '🠃', 'ž': '🠆', 'Ÿ': '🠅', ' ': '🠇', '¡': '🠈', '¢': '🠊',
103
+ '£': '🠉', '¤': '🠋', '¥': '🠠', '¦': '🠢', '§': '🠤', '¨': '🠦',
104
+ '©': '🠨', 'ª': '🠪', '«': '🠬', '¬': '🢜', '­': '🢝', '®': '🢞',
105
+ '¯': '🢟', '°': '🠮', '±': '🠰', '²': '🠲', '³': '🠴', '´': '🠶',
106
+ 'µ': '🠸', '¶': '🠺', '·': '🠹', '¸': '🠻', '¹': '🢘', 'º': '🢚',
107
+ '»': '🢙', '¼': '🢛', '½': '🠼', '¾': '🠾', '¿': '🠽', 'À': '🠿',
108
+ 'Á': '🡀', 'Â': '🡂', 'Ã': '🡁', 'Ä': '🡃', 'Å': '🡄', 'Æ': '🡆',
109
+ 'Ç': '🡅', 'È': '🡇', 'É': '⮨', 'Ê': '⮩', 'Ë': '⮪', 'Ì': '⮫',
110
+ 'Í': '⮬', 'Î': '⮭', 'Ï': '⮮', 'Ð': '⮯', 'Ñ': '🡠', 'Ò': '🡢',
111
+ 'Ó': '🡡', 'Ô': '🡣', 'Õ': '🡤', 'Ö': '🡥', '×': '🡧', 'Ø': '🡦',
112
+ 'Ù': '🡰', 'Ú': '🡲', 'Û': '🡱', 'Ü': '🡳', 'Ý': '🡴', 'Þ': '🡵',
113
+ 'ß': '🡷', 'à': '🡶', 'á': '🢀', 'â': '🢂', 'ã': '🢁', 'ä': '🢃',
114
+ 'å': '🢄', 'æ': '🢅', 'ç': '🢇', 'è': '🢆', 'é': '🢐', 'ê': '🢒',
115
+ 'ë': '🢑', 'ì': '🢓', 'í': '🢔', 'î': '🢖', 'ï': '🢕', 'ð': '🢗',
116
+ },
117
+ "webdings": {
118
+ ' ': '\u0020', '!': '🕷', '"': '🕸', '#': '🕲', '$': '🕶', '%': '🏆',
119
+ '&': '🎖', '\'': '🖇', '(': '🗨', ')': '🗩', '*': '🗰', '+': '🗱',
120
+ ',': '🌶', '-': '🎗', '.': '🙾', '/': '🙼', '0': '🗕', '1': '🗖',
121
+ '2': '🗗', '3': '⏴', '4': '⏵', '5': '⏶', '6': '⏷', '7': '⏪',
122
+ '8': '⏩', '9': '⏮', ':': '⏭', ';': '⏸', '<': '⏹', '=': '⏺',
123
+ '>': '🗚', '?': '🗳', '@': '🛠', 'A': '🏗', 'B': '🏘', 'C': '🏙',
124
+ 'D': '🏚', 'E': '🏜', 'F': '🏭', 'G': '🏛', 'H': '🏠', 'I': '🏖',
125
+ 'J': '🏝', 'K': '🛣', 'L': '🔍', 'M': '🏔', 'N': '👁', 'O': '👂',
126
+ 'P': '🏞', 'Q': '🏕', 'R': '🛤', 'S': '🏟', 'T': '🛳', 'U': '🕬',
127
+ 'V': '🕫', 'W': '🕨', 'X': '🔈', 'Y': '🎔', 'Z': '🎕', '[': '🗬',
128
+ '\\': '🙽', ']': '🗭', '^': '🗪', '_': '🗫', '`': '⮔', 'a': '✔',
129
+ 'b': '🚲', 'c': '□', 'd': '🛡', 'e': '📦', 'f': '🛱', 'g': '■',
130
+ 'h': '🚑', 'i': '🛈', 'j': '🛩', 'k': '🛰', 'l': '🟈', 'm': '🕴',
131
+ 'n': '⚫', 'o': '🛥', 'p': '🚔', 'q': '🗘', 'r': '🗙', 's': '❓',
132
+ 't': '🛲', 'u': '🚇', 'v': '🚍', 'w': '⛳', 'x': '🛇', 'y': '⊖',
133
+ 'z': '🚭', '{': '🗮', '|': '|', '}': '🗯', '~': '🗲',
134
+ chr(128): '🚹', chr(129): '🚺', chr(130): '🛉', chr(131): '🛊', chr(132): '🚼',
135
+ chr(133): '👽', chr(134): '🏋', chr(135): '⛷', chr(136): '🏂', chr(137): '🏌',
136
+ chr(138): '🏊', chr(139): '🏄', chr(140): '🏍', chr(141): '🏎', chr(142): '🚘',
137
+ chr(143): '🗠', chr(144): '🛢', chr(145): '💰', chr(146): '🏷', chr(147): '💳',
138
+ chr(148): '👪', chr(149): '🗡', chr(150): '🗢', chr(151): '🗣', chr(152): '✯',
139
+ chr(153): '🖄', chr(154): '🖅', chr(155): '🖃', chr(156): '🖆', chr(157): '🖹',
140
+ chr(158): '🖺', chr(159): '🖻', chr(160): '🕵', '¡': '🕰', '¢': '🖽',
141
+ '£': '🖾', '¤': '📋', '¥': '🗒', '¦': '🗓', '§': '📖', '¨': '📚',
142
+ '©': '🗞', 'ª': '🗟', '«': '🗃', '¬': '🗂', '­': '🖼', '®': '🎭',
143
+ '¯': '🎜', '°': '🎘', '±': '🎙', '²': '🎧', '³': '💿', '´': '🎞',
144
+ 'µ': '📷', '¶': '🎟', '·': '🎬', '¸': '📽', '¹': '📹', 'º': '📾',
145
+ '»': '📻', '¼': '🎚', '½': '🎛', '¾': '📺', '¿': '💻', 'À': '🖥',
146
+ 'Á': '🖦', 'Â': '🖧', 'Ã': '🕹', 'Ä': '🎮', 'Å': '🕻', 'Æ': '🕼',
147
+ 'Ç': '📟', 'È': '🖁', 'É': '🖀', 'Ê': '🖨', 'Ë': '🖩', 'Ì': '🖿',
148
+ 'Í': '🖪', 'Î': '🗜', 'Ï': '🔒', 'Ð': '🔓', 'Ñ': '🗝', 'Ò': '📥',
149
+ 'Ó': '📤', 'Ô': '🕳', 'Õ': '🌣', 'Ö': '🌤', '×': '🌥', 'Ø': '🌦',
150
+ 'Ù': '☁', 'Ú': '🌧', 'Û': '🌨', 'Ü': '🌩', 'Ý': '🌪', 'Þ': '🌬',
151
+ 'ß': '🌫', 'à': '🌜', 'á': '🌡', 'â': '🛋', 'ã': '🛏', 'ä': '🍽',
152
+ 'å': '🍸', 'æ': '🛎', 'ç': '🛍', 'è': 'Ⓟ', 'é': '♿', 'ê': '🛆',
153
+ 'ë': '🖈', 'ì': '🎓', 'í': '🗤', 'î': '🗥', 'ï': '🗦', 'ð': '🗧',
154
+ 'ñ': '🛪', 'ò': '🐿', 'ó': '🐦', 'ô': '🐟', 'õ': '🐕', 'ö': '🐈',
155
+ '÷': '🙬', 'ø': '🙮', 'ù': '🙭', 'ú': '🙯', 'û': '🗺', 'ü': '🌍',
156
+ 'ý': '🌏', 'þ': '🌎', 'ÿ': '🕊',
157
+ }
158
+ }
table_ocr_backends.py ADDED
The diff for this file is too large to render. See raw diff
 
test_sec_parser.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import io
3
+ import unittest
4
+ from unittest import mock
5
+
6
+ from sec_parser import sec_parser as sp
7
+
8
+
9
+ class NormalizeTextMarkupTests(unittest.TestCase):
10
+ def test_valid_utf8_bypasses_detwingle(self):
11
+ payload = "Revenue — 2025".encode("utf-8")
12
+
13
+ with mock.patch.object(
14
+ sp.UnicodeDammit,
15
+ "detwingle",
16
+ side_effect=AssertionError("detwingle should not be called for valid utf-8"),
17
+ ):
18
+ normalized = sp.normalize_text_markup(payload)
19
+
20
+ self.assertEqual(normalized, "Revenue — 2025")
21
+
22
+ def test_invalid_utf8_still_uses_detwingle_path(self):
23
+ payload = b'\x93quoted\x94'
24
+
25
+ with mock.patch.object(sp.UnicodeDammit, "detwingle", wraps=sp.UnicodeDammit.detwingle) as detwingle:
26
+ normalized = sp.normalize_text_markup(payload)
27
+
28
+ self.assertTrue(detwingle.called)
29
+ self.assertIn("quoted", normalized)
30
+
31
+
32
+ class DebugPrintGatingTests(unittest.TestCase):
33
+ def test_parse_html_filing_stage_debug_is_off_by_default(self):
34
+ html = "<html><body><p>Hello world</p></body></html>"
35
+ stdout = io.StringIO()
36
+
37
+ with mock.patch.dict("os.environ", {}, clear=False):
38
+ with mock.patch.object(sp, "is_document_layout_positioned", return_value=False):
39
+ with mock.patch.object(sp, "parse_positioned_html_islands_via_ocr", return_value=(None, False)):
40
+ with contextlib.redirect_stdout(stdout):
41
+ sp.parse_html_filing(html, form_type="")
42
+
43
+ self.assertNotIn("→ stage 0 (raw):", stdout.getvalue())
44
+
45
+ def test_parse_html_filing_stage_debug_can_be_enabled(self):
46
+ html = "<html><body><p>Hello world</p></body></html>"
47
+ stdout = io.StringIO()
48
+
49
+ with mock.patch.dict("os.environ", {"SEC_PARSER_DEBUG": "1"}, clear=False):
50
+ with mock.patch.object(sp, "is_document_layout_positioned", return_value=False):
51
+ with mock.patch.object(sp, "parse_positioned_html_islands_via_ocr", return_value=(None, False)):
52
+ with contextlib.redirect_stdout(stdout):
53
+ sp.parse_html_filing(html, form_type="")
54
+
55
+ self.assertIn("→ stage 0 (raw):", stdout.getvalue())
56
+
57
+
58
+ if __name__ == "__main__":
59
+ unittest.main()