Commit ·
bd12d83
1
Parent(s): 0b77b50
Simplify falcon-ocr.py to plain mode only, add --progress flag
Browse filesLayout mode is currently broken upstream (PPDocLayoutV3ImageProcessorFast
import fails with latest transformers). Removing it for now; can re-enable
once falcon-perception updates.
Also adds --progress flag to expose the engine's per-image tqdm bar,
and replaces script-level batch tqdm with logger.info messages.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
- falcon-ocr.py +28 -40
falcon-ocr.py
CHANGED
|
@@ -6,8 +6,7 @@
|
|
| 6 |
# "pillow",
|
| 7 |
# "torch>=2.5",
|
| 8 |
# "torchvision",
|
| 9 |
-
# "falcon-perception
|
| 10 |
-
# "tqdm",
|
| 11 |
# ]
|
| 12 |
# ///
|
| 13 |
|
|
@@ -31,9 +30,6 @@ Examples:
|
|
| 31 |
# Basic text OCR
|
| 32 |
uv run falcon-ocr.py input-dataset output-dataset
|
| 33 |
|
| 34 |
-
# Layout-aware OCR
|
| 35 |
-
uv run falcon-ocr.py dense-docs output --task-mode layout
|
| 36 |
-
|
| 37 |
# Test with small sample
|
| 38 |
uv run falcon-ocr.py dataset test --max-samples 5 --shuffle
|
| 39 |
|
|
@@ -58,7 +54,6 @@ import torch
|
|
| 58 |
from datasets import load_dataset
|
| 59 |
from huggingface_hub import DatasetCard, login
|
| 60 |
from PIL import Image
|
| 61 |
-
from tqdm.auto import tqdm
|
| 62 |
|
| 63 |
logging.basicConfig(level=logging.INFO)
|
| 64 |
logger = logging.getLogger(__name__)
|
|
@@ -67,7 +62,6 @@ MODEL_ID = "tiiuae/Falcon-OCR"
|
|
| 67 |
|
| 68 |
TASK_MODES = {
|
| 69 |
"plain": "Full-page text extraction",
|
| 70 |
-
"layout": "Layout-aware OCR (region detection + per-region extraction)",
|
| 71 |
}
|
| 72 |
|
| 73 |
|
|
@@ -155,6 +149,7 @@ def main(
|
|
| 155 |
create_pr: bool = False,
|
| 156 |
compile: bool = True,
|
| 157 |
cudagraph: bool = True,
|
|
|
|
| 158 |
verbose: bool = False,
|
| 159 |
):
|
| 160 |
check_cuda_availability()
|
|
@@ -212,42 +207,30 @@ def main(
|
|
| 212 |
logger.info(f"Processing {len(dataset)} images...")
|
| 213 |
all_outputs = []
|
| 214 |
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 218 |
try:
|
| 219 |
-
|
| 220 |
-
results = engine.generate_with_layout(images=[pil_image], use_tqdm=False)
|
| 221 |
-
regions = results[0] if results else []
|
| 222 |
-
all_outputs.append(json.dumps(regions, ensure_ascii=False))
|
| 223 |
except Exception as e:
|
| 224 |
-
logger.error(f"Error
|
| 225 |
-
|
| 226 |
-
else:
|
| 227 |
-
# Batch plain OCR for better throughput
|
| 228 |
-
batch_size = 8
|
| 229 |
-
for batch_start in tqdm(
|
| 230 |
-
range(0, len(dataset), batch_size), desc="Falcon OCR (plain)"
|
| 231 |
-
):
|
| 232 |
-
batch_end = min(batch_start + batch_size, len(dataset))
|
| 233 |
-
batch_images = []
|
| 234 |
-
for i in range(batch_start, batch_end):
|
| 235 |
-
try:
|
| 236 |
-
batch_images.append(prepare_image(dataset[i][image_column]))
|
| 237 |
-
except Exception as e:
|
| 238 |
-
logger.error(f"Error preparing image {i}: {e}")
|
| 239 |
-
batch_images.append(Image.new("RGB", (100, 100)))
|
| 240 |
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
|
| 252 |
# Calculate processing time
|
| 253 |
processing_duration = datetime.now() - start_time
|
|
@@ -419,6 +402,10 @@ if __name__ == "__main__":
|
|
| 419 |
"--no-cudagraph", action="store_true",
|
| 420 |
help="Disable CUDA graph capture",
|
| 421 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 422 |
parser.add_argument(
|
| 423 |
"--verbose", action="store_true", help="Log resolved package versions",
|
| 424 |
)
|
|
@@ -441,5 +428,6 @@ if __name__ == "__main__":
|
|
| 441 |
create_pr=args.create_pr,
|
| 442 |
compile=not args.no_compile,
|
| 443 |
cudagraph=not args.no_cudagraph,
|
|
|
|
| 444 |
verbose=args.verbose,
|
| 445 |
)
|
|
|
|
| 6 |
# "pillow",
|
| 7 |
# "torch>=2.5",
|
| 8 |
# "torchvision",
|
| 9 |
+
# "falcon-perception",
|
|
|
|
| 10 |
# ]
|
| 11 |
# ///
|
| 12 |
|
|
|
|
| 30 |
# Basic text OCR
|
| 31 |
uv run falcon-ocr.py input-dataset output-dataset
|
| 32 |
|
|
|
|
|
|
|
|
|
|
| 33 |
# Test with small sample
|
| 34 |
uv run falcon-ocr.py dataset test --max-samples 5 --shuffle
|
| 35 |
|
|
|
|
| 54 |
from datasets import load_dataset
|
| 55 |
from huggingface_hub import DatasetCard, login
|
| 56 |
from PIL import Image
|
|
|
|
| 57 |
|
| 58 |
logging.basicConfig(level=logging.INFO)
|
| 59 |
logger = logging.getLogger(__name__)
|
|
|
|
| 62 |
|
| 63 |
TASK_MODES = {
|
| 64 |
"plain": "Full-page text extraction",
|
|
|
|
| 65 |
}
|
| 66 |
|
| 67 |
|
|
|
|
| 149 |
create_pr: bool = False,
|
| 150 |
compile: bool = True,
|
| 151 |
cudagraph: bool = True,
|
| 152 |
+
progress: bool = False,
|
| 153 |
verbose: bool = False,
|
| 154 |
):
|
| 155 |
check_cuda_availability()
|
|
|
|
| 207 |
logger.info(f"Processing {len(dataset)} images...")
|
| 208 |
all_outputs = []
|
| 209 |
|
| 210 |
+
# Batch plain OCR for better throughput
|
| 211 |
+
batch_size = 8
|
| 212 |
+
total_batches = (len(dataset) + batch_size - 1) // batch_size
|
| 213 |
+
for batch_idx, batch_start in enumerate(range(0, len(dataset), batch_size), 1):
|
| 214 |
+
batch_end = min(batch_start + batch_size, len(dataset))
|
| 215 |
+
logger.info(f"Batch {batch_idx}/{total_batches} ({batch_start}/{len(dataset)} done)")
|
| 216 |
+
batch_images = []
|
| 217 |
+
for i in range(batch_start, batch_end):
|
| 218 |
try:
|
| 219 |
+
batch_images.append(prepare_image(dataset[i][image_column]))
|
|
|
|
|
|
|
|
|
|
| 220 |
except Exception as e:
|
| 221 |
+
logger.error(f"Error preparing image {i}: {e}")
|
| 222 |
+
batch_images.append(Image.new("RGB", (100, 100)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 223 |
|
| 224 |
+
try:
|
| 225 |
+
texts = engine.generate_plain(
|
| 226 |
+
images=batch_images, use_tqdm=progress
|
| 227 |
+
)
|
| 228 |
+
all_outputs.extend(texts)
|
| 229 |
+
except Exception as e:
|
| 230 |
+
logger.error(f"Error processing batch {batch_start}-{batch_end}: {e}")
|
| 231 |
+
all_outputs.extend(
|
| 232 |
+
[f"[OCR ERROR: {str(e)[:200]}]"] * len(batch_images)
|
| 233 |
+
)
|
| 234 |
|
| 235 |
# Calculate processing time
|
| 236 |
processing_duration = datetime.now() - start_time
|
|
|
|
| 402 |
"--no-cudagraph", action="store_true",
|
| 403 |
help="Disable CUDA graph capture",
|
| 404 |
)
|
| 405 |
+
parser.add_argument(
|
| 406 |
+
"--progress", action="store_true",
|
| 407 |
+
help="Show per-image progress bar from the inference engine",
|
| 408 |
+
)
|
| 409 |
parser.add_argument(
|
| 410 |
"--verbose", action="store_true", help="Log resolved package versions",
|
| 411 |
)
|
|
|
|
| 428 |
create_pr=args.create_pr,
|
| 429 |
compile=not args.no_compile,
|
| 430 |
cudagraph=not args.no_cudagraph,
|
| 431 |
+
progress=args.progress,
|
| 432 |
verbose=args.verbose,
|
| 433 |
)
|