Datasets:
Size:
1M<n<10M
ArXiv:
Tags:
Document_Understanding
Document_Packet_Splitting
Document_Comprehension
Document_Classification
Document_Recognition
Document_Segmentation
DOI:
License:
Release dataset generator
Browse files- notebooks/01_create_assets.ipynb +131 -0
- notebooks/02_create_benchmarks.ipynb +226 -0
- notebooks/03_analyze_benchmarks.ipynb +321 -0
- src/assets/__init__.py +2 -0
- src/assets/models.py +14 -0
- src/assets/run.py +123 -0
- src/assets/services/__init__.py +2 -0
- src/assets/services/asset_creator.py +114 -0
- src/assets/services/asset_writer.py +63 -0
- src/assets/services/deepseek_ocr.py +151 -0
- src/assets/services/pdf_loader.py +131 -0
- src/assets/services/textract_ocr.py +51 -0
- src/benchmarks/__init__.py +2 -0
- src/benchmarks/models.py +61 -0
- src/benchmarks/run.py +148 -0
- src/benchmarks/services/__init__.py +2 -0
- src/benchmarks/services/asset_loader.py +117 -0
- src/benchmarks/services/benchmark_generator.py +77 -0
- src/benchmarks/services/benchmark_writer.py +177 -0
- src/benchmarks/services/shuffle_strategies/__init__.py +25 -0
- src/benchmarks/services/shuffle_strategies/base_strategy.py +90 -0
- src/benchmarks/services/shuffle_strategies/mono_rand.py +89 -0
- src/benchmarks/services/shuffle_strategies/mono_seq.py +93 -0
- src/benchmarks/services/shuffle_strategies/poly_int.py +111 -0
- src/benchmarks/services/shuffle_strategies/poly_rand.py +111 -0
- src/benchmarks/services/shuffle_strategies/poly_seq.py +94 -0
- src/benchmarks/services/split_manager.py +110 -0
notebooks/01_create_assets.ipynb
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"metadata": {},
|
| 6 |
+
"source": [
|
| 7 |
+
"# Step 1: Create Assets from Raw PDFs\n",
|
| 8 |
+
"\n",
|
| 9 |
+
"This notebook processes raw PDFs and creates structured assets:\n",
|
| 10 |
+
"- Page images (PNG at 300 DPI)\n",
|
| 11 |
+
"- OCR text (AWS Textract)\n",
|
| 12 |
+
"\n",
|
| 13 |
+
"**Output**: Structured assets for benchmark creation"
|
| 14 |
+
]
|
| 15 |
+
},
|
| 16 |
+
{
|
| 17 |
+
"cell_type": "code",
|
| 18 |
+
"execution_count": null,
|
| 19 |
+
"metadata": {},
|
| 20 |
+
"outputs": [],
|
| 21 |
+
"source": [
|
| 22 |
+
"import sys\n",
|
| 23 |
+
"sys.path.append(\"../src/assets\")\n",
|
| 24 |
+
"\n",
|
| 25 |
+
"from services.pdf_loader import PdfLoader\n",
|
| 26 |
+
"from services.textract_ocr import TextractOcr\n",
|
| 27 |
+
"from services.asset_writer import AssetWriter\n",
|
| 28 |
+
"from services.asset_creator import AssetCreator"
|
| 29 |
+
]
|
| 30 |
+
},
|
| 31 |
+
{
|
| 32 |
+
"cell_type": "markdown",
|
| 33 |
+
"metadata": {},
|
| 34 |
+
"source": [
|
| 35 |
+
"## Configuration"
|
| 36 |
+
]
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"cell_type": "code",
|
| 40 |
+
"execution_count": null,
|
| 41 |
+
"metadata": {},
|
| 42 |
+
"outputs": [],
|
| 43 |
+
"source": [
|
| 44 |
+
"RAW_DATA_PATH = '../data/raw_pdfs'\n",
|
| 45 |
+
"OUTPUT_PATH = '../data/assets'\n",
|
| 46 |
+
"WORKERS = 10\n",
|
| 47 |
+
"LIMIT = None # Set to number to limit processing"
|
| 48 |
+
]
|
| 49 |
+
},
|
| 50 |
+
{
|
| 51 |
+
"cell_type": "markdown",
|
| 52 |
+
"metadata": {},
|
| 53 |
+
"source": [
|
| 54 |
+
"## Load PDFs"
|
| 55 |
+
]
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"cell_type": "code",
|
| 59 |
+
"execution_count": null,
|
| 60 |
+
"metadata": {},
|
| 61 |
+
"outputs": [],
|
| 62 |
+
"source": [
|
| 63 |
+
"loader = PdfLoader(raw_data_path=RAW_DATA_PATH)\n",
|
| 64 |
+
"documents = loader.get_all_documents()\n",
|
| 65 |
+
"\n",
|
| 66 |
+
"print(f\"Loaded {len(documents)} documents\")\n",
|
| 67 |
+
"print(f\"Sample: {documents[0]}\")"
|
| 68 |
+
]
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"cell_type": "markdown",
|
| 72 |
+
"metadata": {},
|
| 73 |
+
"source": [
|
| 74 |
+
"## Create Assets"
|
| 75 |
+
]
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"cell_type": "code",
|
| 79 |
+
"execution_count": null,
|
| 80 |
+
"metadata": {},
|
| 81 |
+
"outputs": [],
|
| 82 |
+
"source": [
|
| 83 |
+
"ocr = TextractOcr()\n",
|
| 84 |
+
"writer = AssetWriter(output_base_path=OUTPUT_PATH)\n",
|
| 85 |
+
"creator = AssetCreator(writer, ocr)\n",
|
| 86 |
+
"\n",
|
| 87 |
+
"results = creator.create_all(\n",
|
| 88 |
+
" documents=documents,\n",
|
| 89 |
+
" workers=WORKERS,\n",
|
| 90 |
+
" limit=LIMIT\n",
|
| 91 |
+
")\n",
|
| 92 |
+
"\n",
|
| 93 |
+
"print(f\"\\nResults: {results}\")"
|
| 94 |
+
]
|
| 95 |
+
},
|
| 96 |
+
{
|
| 97 |
+
"cell_type": "markdown",
|
| 98 |
+
"metadata": {},
|
| 99 |
+
"source": [
|
| 100 |
+
"## Save Document Mapping (Optional)"
|
| 101 |
+
]
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"cell_type": "code",
|
| 105 |
+
"execution_count": null,
|
| 106 |
+
"metadata": {},
|
| 107 |
+
"outputs": [],
|
| 108 |
+
"source": [
|
| 109 |
+
"loader.save_document_mapping(\n",
|
| 110 |
+
" documents,\n",
|
| 111 |
+
" output_path='../data/document_mapping.csv'\n",
|
| 112 |
+
")\n",
|
| 113 |
+
"\n",
|
| 114 |
+
"print(\"Document mapping saved!\")"
|
| 115 |
+
]
|
| 116 |
+
}
|
| 117 |
+
],
|
| 118 |
+
"metadata": {
|
| 119 |
+
"kernelspec": {
|
| 120 |
+
"display_name": "Python 3",
|
| 121 |
+
"language": "python",
|
| 122 |
+
"name": "python3"
|
| 123 |
+
},
|
| 124 |
+
"language_info": {
|
| 125 |
+
"name": "python",
|
| 126 |
+
"version": "3.8.0"
|
| 127 |
+
}
|
| 128 |
+
},
|
| 129 |
+
"nbformat": 4,
|
| 130 |
+
"nbformat_minor": 4
|
| 131 |
+
}
|
notebooks/02_create_benchmarks.ipynb
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"metadata": {},
|
| 6 |
+
"source": [
|
| 7 |
+
"# Step 2: Create Benchmarks from Assets\n",
|
| 8 |
+
"\n",
|
| 9 |
+
"This notebook generates document splitting benchmarks using all strategies.\n",
|
| 10 |
+
"\n",
|
| 11 |
+
"**Prerequisites**: Assets created from notebook 01\n",
|
| 12 |
+
"\n",
|
| 13 |
+
"**Output**: Benchmark datasets with ground truth for train/test/validation splits"
|
| 14 |
+
]
|
| 15 |
+
},
|
| 16 |
+
{
|
| 17 |
+
"cell_type": "code",
|
| 18 |
+
"execution_count": null,
|
| 19 |
+
"metadata": {},
|
| 20 |
+
"outputs": [],
|
| 21 |
+
"source": [
|
| 22 |
+
"import sys\n",
|
| 23 |
+
"sys.path.append('../src/benchmarks')\n",
|
| 24 |
+
"\n",
|
| 25 |
+
"from services.asset_loader import AssetLoader\n",
|
| 26 |
+
"from services.split_manager import SplitManager\n",
|
| 27 |
+
"from services.benchmark_generator import BenchmarkGenerator\n",
|
| 28 |
+
"from services.benchmark_writer import BenchmarkWriter\n",
|
| 29 |
+
"from services.shuffle_strategies import get_strategy, STRATEGIES"
|
| 30 |
+
]
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"cell_type": "markdown",
|
| 34 |
+
"metadata": {},
|
| 35 |
+
"source": [
|
| 36 |
+
"## Configuration"
|
| 37 |
+
]
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"cell_type": "code",
|
| 41 |
+
"execution_count": null,
|
| 42 |
+
"metadata": {},
|
| 43 |
+
"outputs": [],
|
| 44 |
+
"source": [
|
| 45 |
+
"ASSETS_PATH = '../data/assets'\n",
|
| 46 |
+
"OUTPUT_PATH = '../data/benchmarks'\n",
|
| 47 |
+
"SPLIT_MAPPING_PATH = '../data/metadata/split_mapping.json'\n",
|
| 48 |
+
"\n",
|
| 49 |
+
"# Number of spliced documents per split\n",
|
| 50 |
+
"NUM_TRAIN = 800\n",
|
| 51 |
+
"NUM_TEST = 200\n",
|
| 52 |
+
"NUM_VAL = 500\n",
|
| 53 |
+
"\n",
|
| 54 |
+
"# Size: small (5-20 pages) or large (20-500 pages)\n",
|
| 55 |
+
"SIZE = 'small'\n",
|
| 56 |
+
"MIN_PAGES = 5 if SIZE == 'small' else 20\n",
|
| 57 |
+
"MAX_PAGES = 20 if SIZE == 'small' else 500\n",
|
| 58 |
+
"\n",
|
| 59 |
+
"RANDOM_SEED = 42\n",
|
| 60 |
+
"\n",
|
| 61 |
+
"print(f\"Configuration:\")\n",
|
| 62 |
+
"print(f\" Size: {SIZE} ({MIN_PAGES}-{MAX_PAGES} pages)\")\n",
|
| 63 |
+
"print(f\" Train: {NUM_TRAIN}, Test: {NUM_TEST}, Val: {NUM_VAL}\")\n",
|
| 64 |
+
"print(f\" Strategies: {list(STRATEGIES.keys())}\")"
|
| 65 |
+
]
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"cell_type": "markdown",
|
| 69 |
+
"metadata": {},
|
| 70 |
+
"source": [
|
| 71 |
+
"## Load Assets"
|
| 72 |
+
]
|
| 73 |
+
},
|
| 74 |
+
{
|
| 75 |
+
"cell_type": "code",
|
| 76 |
+
"execution_count": null,
|
| 77 |
+
"metadata": {},
|
| 78 |
+
"outputs": [],
|
| 79 |
+
"source": [
|
| 80 |
+
"loader = AssetLoader(assets_path=ASSETS_PATH)\n",
|
| 81 |
+
"documents_by_type = loader.load_all_documents()\n",
|
| 82 |
+
"\n",
|
| 83 |
+
"print(f\"Loaded {loader.total_documents} documents across {len(documents_by_type)} types\")\n",
|
| 84 |
+
"for doc_type, docs in documents_by_type.items():\n",
|
| 85 |
+
" print(f\" {doc_type}: {len(docs)} documents\")"
|
| 86 |
+
]
|
| 87 |
+
},
|
| 88 |
+
{
|
| 89 |
+
"cell_type": "markdown",
|
| 90 |
+
"metadata": {},
|
| 91 |
+
"source": [
|
| 92 |
+
"## Create Train/Test/Validation Split"
|
| 93 |
+
]
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"cell_type": "code",
|
| 97 |
+
"execution_count": null,
|
| 98 |
+
"metadata": {},
|
| 99 |
+
"outputs": [],
|
| 100 |
+
"source": [
|
| 101 |
+
"split_manager = SplitManager(random_seed=RANDOM_SEED)\n",
|
| 102 |
+
"splits = split_manager.create_split(documents_by_type)\n",
|
| 103 |
+
"\n",
|
| 104 |
+
"# Save split mapping\n",
|
| 105 |
+
"split_manager.save_split(splits, SPLIT_MAPPING_PATH)\n",
|
| 106 |
+
"\n",
|
| 107 |
+
"print(f\"\\nSplit statistics:\")\n",
|
| 108 |
+
"for split_name in ['train', 'test', 'validation']:\n",
|
| 109 |
+
" total = sum(len(docs) for docs in splits[split_name].values())\n",
|
| 110 |
+
" print(f\" {split_name}: {total} documents\")"
|
| 111 |
+
]
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"cell_type": "markdown",
|
| 115 |
+
"metadata": {},
|
| 116 |
+
"source": [
|
| 117 |
+
"## Generate All Benchmarks"
|
| 118 |
+
]
|
| 119 |
+
},
|
| 120 |
+
{
|
| 121 |
+
"cell_type": "code",
|
| 122 |
+
"execution_count": null,
|
| 123 |
+
"metadata": {},
|
| 124 |
+
"outputs": [],
|
| 125 |
+
"source": [
|
| 126 |
+
"all_strategies = ['mono_seq', 'mono_rand', 'poly_seq', 'poly_int', 'poly_rand']\n",
|
| 127 |
+
"\n",
|
| 128 |
+
"for strategy_name in all_strategies:\n",
|
| 129 |
+
" print(f\"\\n{'='*60}\")\n",
|
| 130 |
+
" print(f\"Processing strategy: {strategy_name}\")\n",
|
| 131 |
+
" print(f\"{'='*60}\")\n",
|
| 132 |
+
" \n",
|
| 133 |
+
" # Initialize strategy\n",
|
| 134 |
+
" strategy = get_strategy(\n",
|
| 135 |
+
" strategy_name,\n",
|
| 136 |
+
" min_pages=MIN_PAGES,\n",
|
| 137 |
+
" max_pages=MAX_PAGES,\n",
|
| 138 |
+
" random_seed=RANDOM_SEED\n",
|
| 139 |
+
" )\n",
|
| 140 |
+
" \n",
|
| 141 |
+
" generator = BenchmarkGenerator(strategy=strategy)\n",
|
| 142 |
+
" output_path = f'{OUTPUT_PATH}/{strategy_name}/{SIZE}'\n",
|
| 143 |
+
" writer = BenchmarkWriter(output_base_path=output_path, assets_path=ASSETS_PATH)\n",
|
| 144 |
+
" \n",
|
| 145 |
+
" # Generate train\n",
|
| 146 |
+
" print(f\"Generating train...\")\n",
|
| 147 |
+
" train_benchmark = generator.generate_for_split(\n",
|
| 148 |
+
" documents_by_type=documents_by_type,\n",
|
| 149 |
+
" doc_names_for_split=splits['train'],\n",
|
| 150 |
+
" num_spliced_docs=NUM_TRAIN,\n",
|
| 151 |
+
" split_name='train',\n",
|
| 152 |
+
" benchmark_name=strategy_name\n",
|
| 153 |
+
" )\n",
|
| 154 |
+
" writer.save_benchmark_set(train_benchmark, 'train')\n",
|
| 155 |
+
" \n",
|
| 156 |
+
" # Generate test\n",
|
| 157 |
+
" print(f\"Generating test...\")\n",
|
| 158 |
+
" test_benchmark = generator.generate_for_split(\n",
|
| 159 |
+
" documents_by_type=documents_by_type,\n",
|
| 160 |
+
" doc_names_for_split=splits['test'],\n",
|
| 161 |
+
" num_spliced_docs=NUM_TEST,\n",
|
| 162 |
+
" split_name='test',\n",
|
| 163 |
+
" benchmark_name=strategy_name\n",
|
| 164 |
+
" )\n",
|
| 165 |
+
" writer.save_benchmark_set(test_benchmark, 'test')\n",
|
| 166 |
+
" \n",
|
| 167 |
+
" # Generate validation\n",
|
| 168 |
+
" print(f\"Generating validation...\")\n",
|
| 169 |
+
" val_benchmark = generator.generate_for_split(\n",
|
| 170 |
+
" documents_by_type=documents_by_type,\n",
|
| 171 |
+
" doc_names_for_split=splits['validation'],\n",
|
| 172 |
+
" num_spliced_docs=NUM_VAL,\n",
|
| 173 |
+
" split_name='validation',\n",
|
| 174 |
+
" benchmark_name=strategy_name\n",
|
| 175 |
+
" )\n",
|
| 176 |
+
" writer.save_benchmark_set(val_benchmark, 'validation')\n",
|
| 177 |
+
" \n",
|
| 178 |
+
" print(f\"✅ Completed {strategy_name}\")\n",
|
| 179 |
+
"\n",
|
| 180 |
+
"print(f\"\\n{'='*60}\")\n",
|
| 181 |
+
"print(f\"✅ All benchmarks generated!\")\n",
|
| 182 |
+
"print(f\"{'='*60}\")"
|
| 183 |
+
]
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"cell_type": "markdown",
|
| 187 |
+
"metadata": {},
|
| 188 |
+
"source": [
|
| 189 |
+
"## Summary"
|
| 190 |
+
]
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"cell_type": "code",
|
| 194 |
+
"execution_count": null,
|
| 195 |
+
"metadata": {},
|
| 196 |
+
"outputs": [],
|
| 197 |
+
"source": [
|
| 198 |
+
"print(f\"\\n✅ Benchmark creation complete!\")\n",
|
| 199 |
+
"print(f\"\\nOutput structure:\")\n",
|
| 200 |
+
"for strategy_name in all_strategies:\n",
|
| 201 |
+
" print(f\"\\n{OUTPUT_PATH}/{strategy_name}/{SIZE}/\")\n",
|
| 202 |
+
" print(f\" ├── train.csv ({NUM_TRAIN} documents)\")\n",
|
| 203 |
+
" print(f\" ├── test.csv ({NUM_TEST} documents)\")\n",
|
| 204 |
+
" print(f\" ├── validation.csv ({NUM_VAL} documents)\")\n",
|
| 205 |
+
" print(f\" └── ground_truth_json/\")\n",
|
| 206 |
+
" print(f\" ├── train/ (JSON per document)\")\n",
|
| 207 |
+
" print(f\" ├── test/ (JSON per document)\")\n",
|
| 208 |
+
" print(f\" └── validation/ (JSON per document)\")\n",
|
| 209 |
+
"print(f\"\\nSplit mapping: {SPLIT_MAPPING_PATH}\")"
|
| 210 |
+
]
|
| 211 |
+
}
|
| 212 |
+
],
|
| 213 |
+
"metadata": {
|
| 214 |
+
"kernelspec": {
|
| 215 |
+
"display_name": "Python 3",
|
| 216 |
+
"language": "python",
|
| 217 |
+
"name": "python3"
|
| 218 |
+
},
|
| 219 |
+
"language_info": {
|
| 220 |
+
"name": "python",
|
| 221 |
+
"version": "3.8.0"
|
| 222 |
+
}
|
| 223 |
+
},
|
| 224 |
+
"nbformat": 4,
|
| 225 |
+
"nbformat_minor": 4
|
| 226 |
+
}
|
notebooks/03_analyze_benchmarks.ipynb
ADDED
|
@@ -0,0 +1,321 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"metadata": {},
|
| 6 |
+
"source": [
|
| 7 |
+
"# Step 3: Analyze Benchmarks\n",
|
| 8 |
+
"\n",
|
| 9 |
+
"This notebook analyzes generated benchmarks:\n",
|
| 10 |
+
"- Load and inspect benchmark CSV and ground truth JSON\n",
|
| 11 |
+
"- Visualize statistics\n",
|
| 12 |
+
"- Validate ground truth consistency\n",
|
| 13 |
+
"\n",
|
| 14 |
+
"**Prerequisites**: Benchmarks created from notebook 02"
|
| 15 |
+
]
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"cell_type": "code",
|
| 19 |
+
"execution_count": null,
|
| 20 |
+
"metadata": {},
|
| 21 |
+
"outputs": [],
|
| 22 |
+
"source": [
|
| 23 |
+
"import json\n",
|
| 24 |
+
"import os\n",
|
| 25 |
+
"from PIL import Image\n",
|
| 26 |
+
"import numpy as np\n",
|
| 27 |
+
"import matplotlib.pyplot as plt\n",
|
| 28 |
+
"from pathlib import Path\n",
|
| 29 |
+
"from collections import Counter\n",
|
| 30 |
+
"from datasets import load_dataset"
|
| 31 |
+
]
|
| 32 |
+
},
|
| 33 |
+
{
|
| 34 |
+
"cell_type": "markdown",
|
| 35 |
+
"metadata": {},
|
| 36 |
+
"source": [
|
| 37 |
+
"## Configuration"
|
| 38 |
+
]
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"cell_type": "code",
|
| 42 |
+
"execution_count": null,
|
| 43 |
+
"metadata": {},
|
| 44 |
+
"outputs": [],
|
| 45 |
+
"source": [
|
| 46 |
+
"STRATEGY = 'poly_seq' # mono_seq, mono_rand, poly_seq, poly_int, poly_rand\n",
|
| 47 |
+
"SIZE = 'small' # small or large\n",
|
| 48 |
+
"SPLIT = 'train' # train, test, or validation\n",
|
| 49 |
+
"\n",
|
| 50 |
+
"BENCHMARK_PATH = f'../data/benchmarks/{STRATEGY}/{SIZE}'\n",
|
| 51 |
+
"CSV_FILE = f'{BENCHMARK_PATH}/{SPLIT}.csv'\n",
|
| 52 |
+
"JSON_DIR = f'{BENCHMARK_PATH}/ground_truth_json/{SPLIT}'"
|
| 53 |
+
]
|
| 54 |
+
},
|
| 55 |
+
{
|
| 56 |
+
"cell_type": "markdown",
|
| 57 |
+
"metadata": {},
|
| 58 |
+
"source": [
|
| 59 |
+
"## Load CSV Benchmark"
|
| 60 |
+
]
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
"cell_type": "code",
|
| 64 |
+
"execution_count": null,
|
| 65 |
+
"metadata": {},
|
| 66 |
+
"outputs": [],
|
| 67 |
+
"source": [
|
| 68 |
+
"# Load dataset using HuggingFace datasets library\n",
|
| 69 |
+
"dataset = load_dataset('csv', data_files=CSV_FILE)['train']\n",
|
| 70 |
+
"\n",
|
| 71 |
+
"print(f\"Loaded {len(dataset)} rows from {CSV_FILE}\")\n",
|
| 72 |
+
"print(f\"\\nColumns: {dataset.column_names}\")\n",
|
| 73 |
+
"print(\"\\nFirst 5 rows:\")\n",
|
| 74 |
+
"# Display first 5 rows\n",
|
| 75 |
+
"for i in range(min(5, len(dataset))):\n",
|
| 76 |
+
" print(dataset[i])"
|
| 77 |
+
]
|
| 78 |
+
},
|
| 79 |
+
{
|
| 80 |
+
"cell_type": "markdown",
|
| 81 |
+
"metadata": {},
|
| 82 |
+
"source": [
|
| 83 |
+
"## Basic Statistics"
|
| 84 |
+
]
|
| 85 |
+
},
|
| 86 |
+
{
|
| 87 |
+
"cell_type": "code",
|
| 88 |
+
"execution_count": null,
|
| 89 |
+
"metadata": {},
|
| 90 |
+
"outputs": [],
|
| 91 |
+
"source": [
|
| 92 |
+
"# Count unique parent documents\n",
|
| 93 |
+
"unique_parents = set(dataset['parent_doc_name'])\n",
|
| 94 |
+
"num_parent_docs = len(unique_parents)\n",
|
| 95 |
+
"total_pages = len(dataset)\n",
|
| 96 |
+
"unique_doc_types = set(dataset['doc_type'])\n",
|
| 97 |
+
"num_doc_types = len(unique_doc_types)\n",
|
| 98 |
+
"\n",
|
| 99 |
+
"print(\"📊 Benchmark Statistics\")\n",
|
| 100 |
+
"print(\"=\"*50)\n",
|
| 101 |
+
"print(f\"Strategy: {STRATEGY}\")\n",
|
| 102 |
+
"print(f\"Size: {SIZE}\")\n",
|
| 103 |
+
"print(f\"Split: {SPLIT}\")\n",
|
| 104 |
+
"print(f\"\\nTotal parent documents: {num_parent_docs}\")\n",
|
| 105 |
+
"print(f\"Total pages: {total_pages}\")\n",
|
| 106 |
+
"print(f\"Unique document types: {num_doc_types}\")\n",
|
| 107 |
+
"print(f\"Average pages per document: {total_pages / num_parent_docs:.2f}\")"
|
| 108 |
+
]
|
| 109 |
+
},
|
| 110 |
+
{
|
| 111 |
+
"cell_type": "markdown",
|
| 112 |
+
"metadata": {},
|
| 113 |
+
"source": [
|
| 114 |
+
"## Pages per Parent Document"
|
| 115 |
+
]
|
| 116 |
+
},
|
| 117 |
+
{
|
| 118 |
+
"cell_type": "code",
|
| 119 |
+
"execution_count": null,
|
| 120 |
+
"metadata": {},
|
| 121 |
+
"outputs": [],
|
| 122 |
+
"source": [
|
| 123 |
+
"# Group by parent_doc_name and count pages\n",
|
| 124 |
+
"\n",
|
| 125 |
+
"pages_per_doc = Counter(dataset['parent_doc_name'])\n",
|
| 126 |
+
"pages_counts = list(pages_per_doc.values())\n",
|
| 127 |
+
"\n",
|
| 128 |
+
"plt.figure(figsize=(10, 6))\n",
|
| 129 |
+
"plt.hist(pages_counts, bins=30, edgecolor='black')\n",
|
| 130 |
+
"plt.xlabel('Total Pages')\n",
|
| 131 |
+
"plt.ylabel('Frequency')\n",
|
| 132 |
+
"plt.title(f'Distribution of Pages per Parent Document ({STRATEGY}/{SIZE}/{SPLIT})')\n",
|
| 133 |
+
"plt.grid(axis='y', alpha=0.3)\n",
|
| 134 |
+
"plt.show()\n",
|
| 135 |
+
"\n",
|
| 136 |
+
"print(\"\\nPages per document statistics:\")\n",
|
| 137 |
+
"\n",
|
| 138 |
+
"print(f\"count {len(pages_counts)}\")\n",
|
| 139 |
+
"print(f\"mean {np.mean(pages_counts):.6f}\")\n",
|
| 140 |
+
"print(f\"std {np.std(pages_counts):.6f}\")\n",
|
| 141 |
+
"print(f\"min {np.min(pages_counts):.6f}\")\n",
|
| 142 |
+
"print(f\"25% {np.percentile(pages_counts, 25):.6f}\")\n",
|
| 143 |
+
"print(f\"50% {np.percentile(pages_counts, 50):.6f}\")\n",
|
| 144 |
+
"print(f\"75% {np.percentile(pages_counts, 75):.6f}\")\n",
|
| 145 |
+
"print(f\"max {np.max(pages_counts):.6f}\")"
|
| 146 |
+
]
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"cell_type": "markdown",
|
| 150 |
+
"metadata": {},
|
| 151 |
+
"source": [
|
| 152 |
+
"## Document Type Distribution"
|
| 153 |
+
]
|
| 154 |
+
},
|
| 155 |
+
{
|
| 156 |
+
"cell_type": "code",
|
| 157 |
+
"execution_count": null,
|
| 158 |
+
"metadata": {},
|
| 159 |
+
"outputs": [],
|
| 160 |
+
"source": [
|
| 161 |
+
"# Count document types\n",
|
| 162 |
+
"type_counts = Counter(dataset['doc_type'])\n",
|
| 163 |
+
"\n",
|
| 164 |
+
"plt.figure(figsize=(12, 6))\n",
|
| 165 |
+
"plt.bar(type_counts.keys(), type_counts.values(), edgecolor='black')\n",
|
| 166 |
+
"plt.xlabel('Document Type')\n",
|
| 167 |
+
"plt.ylabel('Number of Pages')\n",
|
| 168 |
+
"plt.title(f'Document Type Distribution ({STRATEGY}/{SIZE}/{SPLIT})')\n",
|
| 169 |
+
"plt.xticks(rotation=45, ha='right')\n",
|
| 170 |
+
"plt.grid(axis='y', alpha=0.3)\n",
|
| 171 |
+
"plt.tight_layout()\n",
|
| 172 |
+
"plt.show()\n",
|
| 173 |
+
"\n",
|
| 174 |
+
"print(\"\\nDocument type page counts:\")\n",
|
| 175 |
+
"for doc_type, count in sorted(type_counts.items(), key=lambda x: x[1], reverse=True):\n",
|
| 176 |
+
" print(f\" {doc_type}: {count} pages\")"
|
| 177 |
+
]
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"cell_type": "markdown",
|
| 181 |
+
"metadata": {},
|
| 182 |
+
"source": [
|
| 183 |
+
"## Inspect Sample Parent Document"
|
| 184 |
+
]
|
| 185 |
+
},
|
| 186 |
+
{
|
| 187 |
+
"cell_type": "code",
|
| 188 |
+
"execution_count": null,
|
| 189 |
+
"metadata": {},
|
| 190 |
+
"outputs": [],
|
| 191 |
+
"source": [
|
| 192 |
+
"# Get first parent document\n",
|
| 193 |
+
"sample_parent = dataset['parent_doc_name'][0]\n",
|
| 194 |
+
"# Filter dataset for this parent\n",
|
| 195 |
+
"sample_indices = [i for i, parent in enumerate(dataset['parent_doc_name']) if parent == sample_parent]\n",
|
| 196 |
+
"sample_data = dataset.select(sample_indices)\n",
|
| 197 |
+
"\n",
|
| 198 |
+
"print(f\"Parent Document: {sample_parent}\")\n",
|
| 199 |
+
"print(f\"Total Pages: {len(sample_data)}\")\n",
|
| 200 |
+
"print(\"\\nSubdocuments (by group_id):\")\n",
|
| 201 |
+
"unique_groups = set(sample_data['group_id'])\n",
|
| 202 |
+
"for group_id in sorted(unique_groups):\n",
|
| 203 |
+
" group_indices = [i for i, g in enumerate(sample_data['group_id']) if g == group_id]\n",
|
| 204 |
+
" group_data = sample_data.select(group_indices)\n",
|
| 205 |
+
" doc_type = group_data['doc_type'][0]\n",
|
| 206 |
+
" original_doc = group_data['original_doc_name'][0]\n",
|
| 207 |
+
" pages = group_data['page']\n",
|
| 208 |
+
" print(f\" {group_id}: {doc_type}/{original_doc} - Pages: {pages[:5]}{'...' if len(pages) > 5 else ''}\")\n",
|
| 209 |
+
"\n",
|
| 210 |
+
"print(\"\\nFirst 10 pages:\")\n",
|
| 211 |
+
"for i in range(min(10, len(sample_data))):\n",
|
| 212 |
+
" row = sample_data[i]\n",
|
| 213 |
+
" print(f\" Page {row['page']}: {row['doc_type']}/{row['original_doc_name']} (group: {row['group_id']}, ordinal: {row['local_doc_id_page_ordinal']})\")"
|
| 214 |
+
]
|
| 215 |
+
},
|
| 216 |
+
{
|
| 217 |
+
"cell_type": "markdown",
|
| 218 |
+
"metadata": {},
|
| 219 |
+
"source": [
|
| 220 |
+
"## View Sample Images"
|
| 221 |
+
]
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"cell_type": "code",
|
| 225 |
+
"execution_count": null,
|
| 226 |
+
"metadata": {},
|
| 227 |
+
"outputs": [],
|
| 228 |
+
"source": [
|
| 229 |
+
"# Display all pages vertically (scrollable)\n",
|
| 230 |
+
"for i in range(len(sample_data)):\n",
|
| 231 |
+
" row = sample_data[i]\n",
|
| 232 |
+
" img_path = f\"../{row['image_path']}\"\n",
|
| 233 |
+
" \n",
|
| 234 |
+
" if os.path.exists(img_path):\n",
|
| 235 |
+
" img = Image.open(img_path)\n",
|
| 236 |
+
" \n",
|
| 237 |
+
" plt.figure(figsize=(8, 10))\n",
|
| 238 |
+
" plt.imshow(img)\n",
|
| 239 |
+
" plt.title(f\"Page {row['page']} | {row['doc_type']} | Group: {row['group_id']}\", fontsize=12)\n",
|
| 240 |
+
" plt.axis('off')\n",
|
| 241 |
+
" plt.tight_layout()\n",
|
| 242 |
+
" plt.show()\n",
|
| 243 |
+
" \n",
|
| 244 |
+
" print(f\"Page {row['page']}: {row['original_doc_name']}\")\n",
|
| 245 |
+
" print(f\"Group: {row['group_id']} | Ordinal: {row['local_doc_id_page_ordinal']}\")\n",
|
| 246 |
+
" print(\"-\" * 80)"
|
| 247 |
+
]
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"cell_type": "markdown",
|
| 251 |
+
"metadata": {},
|
| 252 |
+
"source": [
|
| 253 |
+
"## Load and Inspect Ground Truth JSON"
|
| 254 |
+
]
|
| 255 |
+
},
|
| 256 |
+
{
|
| 257 |
+
"cell_type": "code",
|
| 258 |
+
"execution_count": null,
|
| 259 |
+
"metadata": {},
|
| 260 |
+
"outputs": [],
|
| 261 |
+
"source": [
|
| 262 |
+
"# Load ground truth JSON for sample parent document\n",
|
| 263 |
+
"json_file = f'{JSON_DIR}/{sample_parent}.json'\n",
|
| 264 |
+
"\n",
|
| 265 |
+
"with open(json_file, 'r') as f:\n",
|
| 266 |
+
" gt_json = json.load(f)\n",
|
| 267 |
+
"\n",
|
| 268 |
+
"print(f\"Ground Truth JSON for: {gt_json['doc_id']}\")\n",
|
| 269 |
+
"print(f\"Total Pages: {gt_json['total_pages']}\")\n",
|
| 270 |
+
"print(f\"Number of Subdocuments: {len(gt_json['subdocuments'])}\")\n",
|
| 271 |
+
"print(f\"\\nFull JSON structure:\")\n",
|
| 272 |
+
"print(json.dumps(gt_json, indent=2))"
|
| 273 |
+
]
|
| 274 |
+
},
|
| 275 |
+
{
|
| 276 |
+
"cell_type": "markdown",
|
| 277 |
+
"metadata": {},
|
| 278 |
+
"source": [
|
| 279 |
+
"## Summary"
|
| 280 |
+
]
|
| 281 |
+
},
|
| 282 |
+
{
|
| 283 |
+
"cell_type": "code",
|
| 284 |
+
"execution_count": null,
|
| 285 |
+
"metadata": {},
|
| 286 |
+
"outputs": [],
|
| 287 |
+
"source": [
|
| 288 |
+
"print(\"\\n📊 Final Summary\")\n",
|
| 289 |
+
"print(\"=\"*50)\n",
|
| 290 |
+
"print(f\"Strategy: {STRATEGY}\")\n",
|
| 291 |
+
"print(f\"Size: {SIZE}\")\n",
|
| 292 |
+
"print(f\"Split: {SPLIT}\")\n",
|
| 293 |
+
"print(f\"\\nParent documents: {num_parent_docs}\")\n",
|
| 294 |
+
"print(f\"Total pages: {total_pages}\")\n",
|
| 295 |
+
"print(f\"Unique document types: {num_doc_types}\")\n",
|
| 296 |
+
"print(f\"\\nAverage pages per document: {total_pages / num_parent_docs:.2f}\")"
|
| 297 |
+
]
|
| 298 |
+
}
|
| 299 |
+
],
|
| 300 |
+
"metadata": {
|
| 301 |
+
"kernelspec": {
|
| 302 |
+
"display_name": "idplabs",
|
| 303 |
+
"language": "python",
|
| 304 |
+
"name": "python3"
|
| 305 |
+
},
|
| 306 |
+
"language_info": {
|
| 307 |
+
"codemirror_mode": {
|
| 308 |
+
"name": "ipython",
|
| 309 |
+
"version": 3
|
| 310 |
+
},
|
| 311 |
+
"file_extension": ".py",
|
| 312 |
+
"mimetype": "text/x-python",
|
| 313 |
+
"name": "python",
|
| 314 |
+
"nbconvert_exporter": "python",
|
| 315 |
+
"pygments_lexer": "ipython3",
|
| 316 |
+
"version": "3.12.11"
|
| 317 |
+
}
|
| 318 |
+
},
|
| 319 |
+
"nbformat": 4,
|
| 320 |
+
"nbformat_minor": 4
|
| 321 |
+
}
|
src/assets/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
src/assets/models.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
| 3 |
+
|
| 4 |
+
from pydantic import BaseModel, Field
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class Document(BaseModel):
|
| 8 |
+
"""Represents a document in the dataset."""
|
| 9 |
+
|
| 10 |
+
doc_type: str = Field(..., description="Document category/type")
|
| 11 |
+
doc_name: str = Field(..., description="Unique document identifier")
|
| 12 |
+
filename: str = Field(..., description="PDF filename")
|
| 13 |
+
absolute_filepath: str = Field(..., description="Full path to PDF file")
|
| 14 |
+
page_count: int = Field(..., gt=0, description="Number of pages in document")
|
src/assets/run.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
Create Assets from Raw PDFs
|
| 6 |
+
|
| 7 |
+
Processes raw PDFs and creates structured assets:
|
| 8 |
+
- Page images (PNG at 300 DPI)
|
| 9 |
+
- OCR text (AWS Textract)
|
| 10 |
+
|
| 11 |
+
Usage:
|
| 12 |
+
python main.py
|
| 13 |
+
python main.py --workers 20 --limit 100
|
| 14 |
+
"""
|
| 15 |
+
import os
|
| 16 |
+
import argparse
|
| 17 |
+
from loguru import logger
|
| 18 |
+
|
| 19 |
+
from services.pdf_loader import PdfLoader
|
| 20 |
+
from services.textract_ocr import TextractOcr
|
| 21 |
+
from services.deepseek_ocr import DeepSeekOcr
|
| 22 |
+
from services.asset_writer import AssetWriter
|
| 23 |
+
from services.asset_creator import AssetCreator
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def main():
|
| 27 |
+
parser = argparse.ArgumentParser(description='Create assets from raw PDFs')
|
| 28 |
+
parser.add_argument('--raw-data-path', default='../raw_data')
|
| 29 |
+
parser.add_argument('--output-path', default='../processed_assets')
|
| 30 |
+
parser.add_argument('--metadata-path', default='../metadata')
|
| 31 |
+
parser.add_argument('--workers', type=int, default=10)
|
| 32 |
+
parser.add_argument('--limit', type=int, default=None)
|
| 33 |
+
parser.add_argument('--save-mapping', action='store_true')
|
| 34 |
+
parser.add_argument('--use-deepseek-for-language', action='store_true', help='Use DeepSeek OCR for language docs (default: Textract)')
|
| 35 |
+
parser.add_argument('--s3-bucket', default=os.getenv('DOCSPLIT_S3_BUCKET'), help='S3 bucket for Textract temporary uploads')
|
| 36 |
+
parser.add_argument('--s3-prefix', default='textract-temp', help='S3 prefix for uploads')
|
| 37 |
+
|
| 38 |
+
args = parser.parse_args()
|
| 39 |
+
|
| 40 |
+
logger.info("Creating assets from PDFs")
|
| 41 |
+
|
| 42 |
+
# Load all PDFs
|
| 43 |
+
loader = PdfLoader(raw_data_path=args.raw_data_path)
|
| 44 |
+
documents = loader.get_all_documents()
|
| 45 |
+
|
| 46 |
+
successful_docs = []
|
| 47 |
+
|
| 48 |
+
if args.use_deepseek_for_language:
|
| 49 |
+
# Separate language documents for DeepSeek processing
|
| 50 |
+
language_docs = [doc for doc in documents if doc.doc_type == 'language']
|
| 51 |
+
other_docs = [doc for doc in documents if doc.doc_type != 'language']
|
| 52 |
+
|
| 53 |
+
# Process non-language documents with Textract
|
| 54 |
+
if other_docs:
|
| 55 |
+
logger.info(f"Processing {len(other_docs)} non-language documents with Textract")
|
| 56 |
+
ocr = TextractOcr(s3_bucket=args.s3_bucket, s3_prefix=args.s3_prefix)
|
| 57 |
+
writer = AssetWriter(output_base_path=args.output_path)
|
| 58 |
+
creator = AssetCreator(writer, ocr)
|
| 59 |
+
|
| 60 |
+
results = creator.create_all(
|
| 61 |
+
documents=other_docs,
|
| 62 |
+
workers=args.workers,
|
| 63 |
+
limit=args.limit
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
logger.info(f"Textract completed: {results}")
|
| 67 |
+
if results['failed'] > 0:
|
| 68 |
+
logger.warning(f"Textract failed: {results['failed_docs']}")
|
| 69 |
+
|
| 70 |
+
failed_names = set(results['failed_docs'])
|
| 71 |
+
successful_docs.extend([doc for doc in other_docs if doc.doc_name not in failed_names])
|
| 72 |
+
|
| 73 |
+
# Process language documents with DeepSeek
|
| 74 |
+
if language_docs:
|
| 75 |
+
logger.info(f"Processing {len(language_docs)} language documents with DeepSeek OCR")
|
| 76 |
+
try:
|
| 77 |
+
deepseek_ocr = DeepSeekOcr()
|
| 78 |
+
writer = AssetWriter(output_base_path=args.output_path)
|
| 79 |
+
creator = AssetCreator(writer, deepseek_ocr)
|
| 80 |
+
|
| 81 |
+
results = creator.create_all(
|
| 82 |
+
documents=language_docs,
|
| 83 |
+
workers=args.workers,
|
| 84 |
+
limit=args.limit
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
logger.info(f"DeepSeek completed: {results}")
|
| 88 |
+
if results['failed'] > 0:
|
| 89 |
+
logger.warning(f"DeepSeek failed: {results['failed_docs']}")
|
| 90 |
+
|
| 91 |
+
failed_names = set(results['failed_docs'])
|
| 92 |
+
successful_docs.extend([doc for doc in language_docs if doc.doc_name not in failed_names])
|
| 93 |
+
except Exception as e:
|
| 94 |
+
logger.error(f"DeepSeek OCR initialization failed: {e}")
|
| 95 |
+
else:
|
| 96 |
+
# Process ALL documents with Textract
|
| 97 |
+
logger.info(f"Processing {len(documents)} documents with Textract")
|
| 98 |
+
ocr = TextractOcr(s3_bucket=args.s3_bucket, s3_prefix=args.s3_prefix)
|
| 99 |
+
writer = AssetWriter(output_base_path=args.output_path)
|
| 100 |
+
creator = AssetCreator(writer, ocr)
|
| 101 |
+
|
| 102 |
+
results = creator.create_all(
|
| 103 |
+
documents=documents,
|
| 104 |
+
workers=args.workers,
|
| 105 |
+
limit=args.limit
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
logger.info(f"Textract completed: {results}")
|
| 109 |
+
if results['failed'] > 0:
|
| 110 |
+
logger.warning(f"Textract failed: {results['failed_docs']}")
|
| 111 |
+
|
| 112 |
+
failed_names = set(results['failed_docs'])
|
| 113 |
+
successful_docs.extend([doc for doc in documents if doc.doc_name not in failed_names])
|
| 114 |
+
|
| 115 |
+
# Save mapping AFTER processing with only successful documents
|
| 116 |
+
if args.save_mapping:
|
| 117 |
+
mapping_path = f"{args.metadata_path}/document_mapping.csv"
|
| 118 |
+
loader.save_document_mapping(successful_docs, output_path=mapping_path)
|
| 119 |
+
logger.info(f"Saved mapping for {len(successful_docs)} successful documents")
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
if __name__ == '__main__':
|
| 123 |
+
main()
|
src/assets/services/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
src/assets/services/asset_creator.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 6 |
+
from typing import List, Optional
|
| 7 |
+
from loguru import logger
|
| 8 |
+
from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
|
| 9 |
+
|
| 10 |
+
from models import Document
|
| 11 |
+
from services.textract_ocr import TextractOcr
|
| 12 |
+
from services.deepseek_ocr import DeepSeekOcr
|
| 13 |
+
from services.asset_writer import AssetWriter
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class AssetCreator:
|
| 17 |
+
"""Creates assets from PDFs: extracts images and OCR text."""
|
| 18 |
+
|
| 19 |
+
def __init__(self, writer: AssetWriter, ocr):
|
| 20 |
+
"""Initialize AssetCreator.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
writer: AssetWriter instance
|
| 24 |
+
ocr: OCR service (TextractOcr or DeepSeekOcr)
|
| 25 |
+
"""
|
| 26 |
+
self.writer = writer
|
| 27 |
+
self.ocr = ocr
|
| 28 |
+
self.is_deepseek = isinstance(ocr, DeepSeekOcr)
|
| 29 |
+
|
| 30 |
+
@retry(
|
| 31 |
+
stop=stop_after_attempt(3),
|
| 32 |
+
wait=wait_exponential(multiplier=1, min=4, max=10),
|
| 33 |
+
retry=retry_if_exception_type((Exception,)),
|
| 34 |
+
reraise=True,
|
| 35 |
+
)
|
| 36 |
+
def create_assets(self, doc: Document) -> None:
|
| 37 |
+
"""Create assets for a single document with retry logic."""
|
| 38 |
+
try:
|
| 39 |
+
logger.trace(f"Processing: {doc.doc_name}")
|
| 40 |
+
|
| 41 |
+
# Read PDF
|
| 42 |
+
with open(doc.absolute_filepath, 'rb') as f:
|
| 43 |
+
pdf_bytes = f.read()
|
| 44 |
+
|
| 45 |
+
# Extract text via OCR
|
| 46 |
+
if self.is_deepseek:
|
| 47 |
+
# DeepSeek needs images, extract them first
|
| 48 |
+
from src.idp_labs.core.util.pdf_util import PdfUtil
|
| 49 |
+
images = PdfUtil.get_pages_images_from_pdf(pdf_path=doc.absolute_filepath)
|
| 50 |
+
text_pages = self.ocr.extract_text_from_images(images)
|
| 51 |
+
else:
|
| 52 |
+
# Textract uses PDF bytes
|
| 53 |
+
text_pages = self.ocr.extract_text_from_pdf(pdf_bytes)
|
| 54 |
+
|
| 55 |
+
# Write all assets
|
| 56 |
+
self.writer.save_document_assets(
|
| 57 |
+
doc_type=doc.doc_type,
|
| 58 |
+
doc_name=doc.doc_name,
|
| 59 |
+
filename=doc.filename,
|
| 60 |
+
pdf_bytes=pdf_bytes,
|
| 61 |
+
text_pages=text_pages
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
logger.trace(f"Completed: {doc.doc_name}")
|
| 65 |
+
|
| 66 |
+
except Exception as e:
|
| 67 |
+
logger.exception(f"Error processing {doc.doc_name}: {e}")
|
| 68 |
+
raise
|
| 69 |
+
|
| 70 |
+
def create_all(
|
| 71 |
+
self,
|
| 72 |
+
documents: List[Document],
|
| 73 |
+
workers: Optional[int] = None,
|
| 74 |
+
limit: Optional[int] = None
|
| 75 |
+
) -> dict:
|
| 76 |
+
"""Create assets for all documents with concurrent execution.
|
| 77 |
+
|
| 78 |
+
Returns:
|
| 79 |
+
Dict with 'successful', 'failed', and 'failed_docs' keys.
|
| 80 |
+
"""
|
| 81 |
+
if workers is None:
|
| 82 |
+
workers = os.cpu_count() or 1
|
| 83 |
+
|
| 84 |
+
docs_to_process = documents[:limit] if limit and limit > 0 else documents
|
| 85 |
+
|
| 86 |
+
logger.info(f"Processing {len(docs_to_process)} documents with {workers} workers")
|
| 87 |
+
|
| 88 |
+
successful = 0
|
| 89 |
+
failed = 0
|
| 90 |
+
failed_docs = []
|
| 91 |
+
|
| 92 |
+
with ThreadPoolExecutor(max_workers=workers) as executor:
|
| 93 |
+
future_to_doc = {
|
| 94 |
+
executor.submit(self.create_assets, doc): doc
|
| 95 |
+
for doc in docs_to_process
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
for future in as_completed(future_to_doc):
|
| 99 |
+
doc = future_to_doc[future]
|
| 100 |
+
try:
|
| 101 |
+
future.result()
|
| 102 |
+
successful += 1
|
| 103 |
+
except Exception as e:
|
| 104 |
+
failed += 1
|
| 105 |
+
failed_docs.append(doc.doc_name)
|
| 106 |
+
logger.error(f"Failed: {doc.doc_name} - {e}")
|
| 107 |
+
|
| 108 |
+
logger.info(f"Completed: {successful} successful, {failed} failed")
|
| 109 |
+
|
| 110 |
+
return {
|
| 111 |
+
'successful': successful,
|
| 112 |
+
'failed': failed,
|
| 113 |
+
'failed_docs': failed_docs
|
| 114 |
+
}
|
src/assets/services/asset_writer.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from typing import List
|
| 7 |
+
from PIL import Image
|
| 8 |
+
import fitz
|
| 9 |
+
from loguru import logger
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class AssetWriter:
|
| 13 |
+
"""Writes document assets (PDF, images, OCR text) to disk."""
|
| 14 |
+
|
| 15 |
+
def __init__(self, output_base_path: str = ".data/rvl-cdip-nmp-hf/rvl-cdip-nmp-assets"):
|
| 16 |
+
self.output_base_path = Path(output_base_path)
|
| 17 |
+
|
| 18 |
+
def save_document_assets(
|
| 19 |
+
self,
|
| 20 |
+
doc_type: str,
|
| 21 |
+
doc_name: str,
|
| 22 |
+
filename: str,
|
| 23 |
+
pdf_bytes: bytes,
|
| 24 |
+
text_pages: List[str]
|
| 25 |
+
):
|
| 26 |
+
"""Save all assets for a document: original PDF, page images, and OCR text."""
|
| 27 |
+
|
| 28 |
+
# Create directory structure: {doc_type}/{filename}/
|
| 29 |
+
doc_dir = self.output_base_path / doc_type / filename
|
| 30 |
+
original_dir = doc_dir / "original"
|
| 31 |
+
pages_dir = doc_dir / "pages"
|
| 32 |
+
|
| 33 |
+
original_dir.mkdir(parents=True, exist_ok=True)
|
| 34 |
+
|
| 35 |
+
# Save original PDF
|
| 36 |
+
original_pdf_path = original_dir / filename
|
| 37 |
+
with open(original_pdf_path, 'wb') as f:
|
| 38 |
+
f.write(pdf_bytes)
|
| 39 |
+
|
| 40 |
+
# Extract and save page images
|
| 41 |
+
pdf_doc = fitz.open(stream=pdf_bytes, filetype="pdf")
|
| 42 |
+
|
| 43 |
+
for page_num in range(len(pdf_doc)):
|
| 44 |
+
page = pdf_doc[page_num]
|
| 45 |
+
page_num_str = f"{page_num + 1:04d}"
|
| 46 |
+
|
| 47 |
+
# Create page directory
|
| 48 |
+
page_dir = pages_dir / page_num_str
|
| 49 |
+
page_dir.mkdir(parents=True, exist_ok=True)
|
| 50 |
+
|
| 51 |
+
# Save page image
|
| 52 |
+
pix = page.get_pixmap(dpi=300)
|
| 53 |
+
img_path = page_dir / f"page-{page_num_str}.png"
|
| 54 |
+
pix.save(str(img_path))
|
| 55 |
+
|
| 56 |
+
# Save OCR text
|
| 57 |
+
if page_num < len(text_pages):
|
| 58 |
+
text_path = page_dir / f"page-{page_num_str}-textract.md"
|
| 59 |
+
with open(text_path, 'w', encoding='utf-8') as f:
|
| 60 |
+
f.write(text_pages[page_num])
|
| 61 |
+
|
| 62 |
+
pdf_doc.close()
|
| 63 |
+
logger.debug(f"Saved assets for {doc_type}/{doc_name}")
|
src/assets/services/deepseek_ocr.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
import sys
|
| 6 |
+
import io
|
| 7 |
+
import tempfile
|
| 8 |
+
import shutil
|
| 9 |
+
import re
|
| 10 |
+
from typing import List, Optional
|
| 11 |
+
from contextlib import redirect_stdout
|
| 12 |
+
from loguru import logger
|
| 13 |
+
from PIL import Image
|
| 14 |
+
|
| 15 |
+
# Pin model revision for reproducibility and supply chain security
|
| 16 |
+
# Update README.md 9f30c71 commited on Nov 3, 2025
|
| 17 |
+
# False positive, high entropy string is acutally a commit hash required to remediate B615
|
| 18 |
+
MODEL_REVISION = "9f30c71f441d010e5429c532364a86705536c53a" # nosec SECRET-HEX-HIGH-ENTROPY-STRING
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class DeepSeekOcr:
|
| 22 |
+
"""DeepSeek OCR for language documents."""
|
| 23 |
+
|
| 24 |
+
def __init__(
|
| 25 |
+
self,
|
| 26 |
+
model_name: str = "deepseek-ai/DeepSeek-OCR",
|
| 27 |
+
device: str = "cuda",
|
| 28 |
+
cache_dir: Optional[str] = None
|
| 29 |
+
):
|
| 30 |
+
"""Initialize DeepSeek OCR from Hugging Face.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
model_name: Hugging Face model name
|
| 34 |
+
device: Device to run model on ('cuda' or 'cpu')
|
| 35 |
+
cache_dir: Optional cache directory for model downloads (use larger disk if needed)
|
| 36 |
+
"""
|
| 37 |
+
try:
|
| 38 |
+
from transformers import AutoModel, AutoTokenizer
|
| 39 |
+
import torch
|
| 40 |
+
|
| 41 |
+
# Verify CUDA availability
|
| 42 |
+
if device == "cuda" and not torch.cuda.is_available():
|
| 43 |
+
logger.warning("CUDA requested but not available. Falling back to CPU. Performance will be significantly slower.")
|
| 44 |
+
device = "cpu"
|
| 45 |
+
|
| 46 |
+
logger.info(f"Loading DeepSeek model: {model_name} on {device}")
|
| 47 |
+
|
| 48 |
+
self.tokenizer = AutoTokenizer.from_pretrained( # nosec B615 - False positive, see MODEL_REVISION is set to a specific version hash
|
| 49 |
+
model_name,
|
| 50 |
+
trust_remote_code=True,
|
| 51 |
+
cache_dir=cache_dir,
|
| 52 |
+
revision=MODEL_REVISION
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
self.model = AutoModel.from_pretrained( # nosec B615 - False positive, see MODEL_REVISION is set to a specific version hash
|
| 56 |
+
model_name,
|
| 57 |
+
_attn_implementation='flash_attention_2',
|
| 58 |
+
trust_remote_code=True,
|
| 59 |
+
use_safetensors=True,
|
| 60 |
+
torch_dtype=torch.bfloat16,
|
| 61 |
+
cache_dir=cache_dir,
|
| 62 |
+
revision=MODEL_REVISION
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
self.model = self.model.eval()
|
| 66 |
+
if device == "cuda":
|
| 67 |
+
self.model = self.model.cuda()
|
| 68 |
+
|
| 69 |
+
self.device = device
|
| 70 |
+
logger.info(f"DeepSeek model loaded successfully on {device}")
|
| 71 |
+
except ImportError as e:
|
| 72 |
+
logger.error(f"Failed to import dependencies: {e}")
|
| 73 |
+
raise
|
| 74 |
+
except Exception as e:
|
| 75 |
+
logger.error(f"Failed to load DeepSeek model: {e}")
|
| 76 |
+
raise
|
| 77 |
+
|
| 78 |
+
def extract_text_from_images(self, images: List[Image.Image]) -> List[str]:
|
| 79 |
+
"""Extract text from page images using DeepSeek OCR.
|
| 80 |
+
|
| 81 |
+
Args:
|
| 82 |
+
images: List of PIL Images
|
| 83 |
+
|
| 84 |
+
Returns:
|
| 85 |
+
List of markdown text per page
|
| 86 |
+
"""
|
| 87 |
+
texts = []
|
| 88 |
+
temp_dir = tempfile.mkdtemp(prefix='deepseek_ocr_')
|
| 89 |
+
|
| 90 |
+
try:
|
| 91 |
+
for idx, image in enumerate(images):
|
| 92 |
+
if not isinstance(image, Image.Image):
|
| 93 |
+
logger.warning(f"Page {idx + 1} is not a valid PIL Image, skipping")
|
| 94 |
+
texts.append("")
|
| 95 |
+
continue
|
| 96 |
+
|
| 97 |
+
try:
|
| 98 |
+
# Suppress model debug output
|
| 99 |
+
with redirect_stdout(io.StringIO()):
|
| 100 |
+
self.model.infer(
|
| 101 |
+
self.tokenizer,
|
| 102 |
+
prompt="<image>\n<|grounding|>Convert the document to markdown.",
|
| 103 |
+
image_file=image,
|
| 104 |
+
output_path=temp_dir,
|
| 105 |
+
base_size=1024,
|
| 106 |
+
image_size=640,
|
| 107 |
+
crop_mode=True,
|
| 108 |
+
save_results=True
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
# Read result from saved file
|
| 112 |
+
result_file = os.path.join(temp_dir, 'result.mmd')
|
| 113 |
+
if os.path.exists(result_file):
|
| 114 |
+
with open(result_file, 'r', encoding='utf-8') as f:
|
| 115 |
+
result = f.read()
|
| 116 |
+
|
| 117 |
+
# Clean markup tags
|
| 118 |
+
clean_text = re.sub(r'<\|ref\|>text<\|/ref\|>', '', result)
|
| 119 |
+
clean_text = re.sub(r'<\|det\|>\[\[.*?\]\]<\|/det\|>', '', clean_text)
|
| 120 |
+
clean_text = clean_text.strip()
|
| 121 |
+
|
| 122 |
+
texts.append(clean_text)
|
| 123 |
+
|
| 124 |
+
# Delete all output files after reading
|
| 125 |
+
for item in os.listdir(temp_dir):
|
| 126 |
+
item_path = os.path.join(temp_dir, item)
|
| 127 |
+
try:
|
| 128 |
+
if os.path.isfile(item_path):
|
| 129 |
+
os.remove(item_path)
|
| 130 |
+
elif os.path.isdir(item_path):
|
| 131 |
+
shutil.rmtree(item_path)
|
| 132 |
+
except Exception as e:
|
| 133 |
+
logger.debug(f"Failed to clean up {item_path}: {e}")
|
| 134 |
+
|
| 135 |
+
logger.info(f"DeepSeek OCR completed for page {idx + 1}")
|
| 136 |
+
else:
|
| 137 |
+
logger.warning(f"No result file found for page {idx + 1}")
|
| 138 |
+
texts.append("")
|
| 139 |
+
|
| 140 |
+
except Exception as e:
|
| 141 |
+
logger.error(f"DeepSeek OCR error on page {idx + 1}: {e}")
|
| 142 |
+
texts.append("")
|
| 143 |
+
finally:
|
| 144 |
+
# Cleanup temp directory
|
| 145 |
+
if os.path.exists(temp_dir):
|
| 146 |
+
shutil.rmtree(temp_dir, ignore_errors=True)
|
| 147 |
+
logger.debug(f"Cleaned up temp directory: {temp_dir}")
|
| 148 |
+
|
| 149 |
+
return texts
|
| 150 |
+
|
| 151 |
+
|
src/assets/services/pdf_loader.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from typing import List, Optional
|
| 7 |
+
import csv
|
| 8 |
+
from loguru import logger
|
| 9 |
+
import fitz
|
| 10 |
+
|
| 11 |
+
from models import Document
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class PdfLoader:
|
| 15 |
+
"""Loads PDF files from raw data directory."""
|
| 16 |
+
|
| 17 |
+
def __init__(self, raw_data_path: str = ".data/rvl-cdip-mp/raw_data", max_file_size_mb: float = 150):
|
| 18 |
+
"""Initialize PDF loader.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
raw_data_path: Path to raw data directory
|
| 22 |
+
max_file_size_mb: Maximum file size in MB for valid PDFs
|
| 23 |
+
"""
|
| 24 |
+
self.raw_data_path = Path(raw_data_path)
|
| 25 |
+
self.max_file_size_mb = max_file_size_mb
|
| 26 |
+
|
| 27 |
+
def validate_pdf(self, file_path: Path) -> tuple[bool, str, int]:
|
| 28 |
+
"""Validate if a PDF is readable and within size limits.
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
file_path: Path to PDF file
|
| 32 |
+
|
| 33 |
+
Returns:
|
| 34 |
+
Tuple of (is_valid, status_message, page_count)
|
| 35 |
+
"""
|
| 36 |
+
try:
|
| 37 |
+
# Check file size
|
| 38 |
+
size_bytes = file_path.stat().st_size
|
| 39 |
+
size_mb = size_bytes / (1024 * 1024)
|
| 40 |
+
|
| 41 |
+
if size_bytes == 0:
|
| 42 |
+
return False, "ZERO_SIZE", 0
|
| 43 |
+
|
| 44 |
+
if size_mb > self.max_file_size_mb:
|
| 45 |
+
return False, f"TOO_LARGE ({size_mb:.1f}MB)", 0
|
| 46 |
+
|
| 47 |
+
# Check PDF header
|
| 48 |
+
with open(file_path, "rb") as f:
|
| 49 |
+
header = f.read(4)
|
| 50 |
+
if header != b"%PDF":
|
| 51 |
+
return False, "INVALID_PDF_HEADER", 0
|
| 52 |
+
|
| 53 |
+
# Test readability with PyMuPDF
|
| 54 |
+
doc = fitz.open(str(file_path))
|
| 55 |
+
|
| 56 |
+
if doc.is_encrypted:
|
| 57 |
+
doc.close()
|
| 58 |
+
return False, "ENCRYPTED_PDF", 0
|
| 59 |
+
|
| 60 |
+
page_count = doc.page_count
|
| 61 |
+
if page_count == 0:
|
| 62 |
+
doc.close()
|
| 63 |
+
return False, "NO_PAGES", 0
|
| 64 |
+
|
| 65 |
+
# Try to access first page
|
| 66 |
+
page = doc[0]
|
| 67 |
+
page.get_text()
|
| 68 |
+
doc.close()
|
| 69 |
+
|
| 70 |
+
return True, "VALID", page_count
|
| 71 |
+
|
| 72 |
+
except Exception as e:
|
| 73 |
+
return False, f"ERROR: {str(e)[:50]}", 0
|
| 74 |
+
|
| 75 |
+
def get_all_documents(self, exclude_types: Optional[List[str]] = None) -> List[Document]:
|
| 76 |
+
"""Get all valid PDF documents from raw data directory.
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
exclude_types: List of document types to exclude (e.g., ['language'])
|
| 80 |
+
"""
|
| 81 |
+
if exclude_types is None:
|
| 82 |
+
exclude_types = []
|
| 83 |
+
|
| 84 |
+
documents = []
|
| 85 |
+
|
| 86 |
+
for doc_type_dir in self.raw_data_path.iterdir():
|
| 87 |
+
if not doc_type_dir.is_dir():
|
| 88 |
+
continue
|
| 89 |
+
|
| 90 |
+
doc_type = doc_type_dir.name
|
| 91 |
+
|
| 92 |
+
if doc_type in exclude_types:
|
| 93 |
+
logger.info(f"Skipping excluded type: {doc_type}")
|
| 94 |
+
continue
|
| 95 |
+
|
| 96 |
+
for pdf_file in doc_type_dir.glob("*.pdf"):
|
| 97 |
+
is_valid, status, page_count = self.validate_pdf(pdf_file)
|
| 98 |
+
|
| 99 |
+
if not is_valid:
|
| 100 |
+
logger.warning(f"Skipping invalid PDF {pdf_file}: {status}")
|
| 101 |
+
continue
|
| 102 |
+
|
| 103 |
+
doc = Document(
|
| 104 |
+
doc_type=doc_type,
|
| 105 |
+
doc_name=pdf_file.stem,
|
| 106 |
+
filename=pdf_file.name,
|
| 107 |
+
absolute_filepath=str(pdf_file.absolute()),
|
| 108 |
+
page_count=page_count
|
| 109 |
+
)
|
| 110 |
+
documents.append(doc)
|
| 111 |
+
|
| 112 |
+
logger.info(f"Loaded {len(documents)} valid documents")
|
| 113 |
+
return documents
|
| 114 |
+
|
| 115 |
+
def save_document_mapping(self, documents: List[Document], output_path: str):
|
| 116 |
+
"""Save document mapping to CSV."""
|
| 117 |
+
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
| 118 |
+
|
| 119 |
+
with open(output_path, 'w', newline='') as f:
|
| 120 |
+
writer = csv.DictWriter(f, fieldnames=['type', 'doc_name', 'filename', 'pages', 'validation_status'])
|
| 121 |
+
writer.writeheader()
|
| 122 |
+
for doc in documents:
|
| 123 |
+
writer.writerow({
|
| 124 |
+
'type': doc.doc_type,
|
| 125 |
+
'doc_name': doc.doc_name,
|
| 126 |
+
'filename': doc.filename,
|
| 127 |
+
'pages': doc.page_count,
|
| 128 |
+
'validation_status': 'VALID'
|
| 129 |
+
})
|
| 130 |
+
|
| 131 |
+
logger.info(f"Saved document mapping to {output_path}")
|
src/assets/services/textract_ocr.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
| 3 |
+
|
| 4 |
+
from typing import List
|
| 5 |
+
from loguru import logger
|
| 6 |
+
from textractor import Textractor
|
| 7 |
+
from textractor.data.constants import TextractFeatures
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class TextractOcr:
|
| 11 |
+
"""Extracts text from PDFs using AWS Textract with S3 upload."""
|
| 12 |
+
|
| 13 |
+
def __init__(self, s3_bucket: str, s3_prefix: str = 'textract-temp'):
|
| 14 |
+
"""Initialize Textract OCR.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
s3_bucket: S3 bucket for temporary uploads
|
| 18 |
+
s3_prefix: S3 prefix for uploads
|
| 19 |
+
"""
|
| 20 |
+
self.textractor = Textractor()
|
| 21 |
+
self.s3_upload_path = f"s3://{s3_bucket}/{s3_prefix}"
|
| 22 |
+
logger.info(f"Textract S3 upload path: {self.s3_upload_path}")
|
| 23 |
+
|
| 24 |
+
def extract_text_from_pdf(self, pdf_bytes: bytes) -> List[str]:
|
| 25 |
+
"""Extract text from PDF using Textract with S3 upload.
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
List of markdown text, one per page.
|
| 29 |
+
"""
|
| 30 |
+
# Use start_document_analysis with S3 upload
|
| 31 |
+
lazy_document = self.textractor.start_document_analysis(
|
| 32 |
+
file_source=pdf_bytes,
|
| 33 |
+
s3_upload_path=self.s3_upload_path,
|
| 34 |
+
features=[TextractFeatures.LAYOUT],
|
| 35 |
+
save_image=False,
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
# Wait for completion
|
| 39 |
+
_ = lazy_document.response
|
| 40 |
+
pages = lazy_document.document.pages
|
| 41 |
+
|
| 42 |
+
# Convert to markdown
|
| 43 |
+
page_texts = []
|
| 44 |
+
for page in pages:
|
| 45 |
+
try:
|
| 46 |
+
page_content = page.to_markdown()
|
| 47 |
+
except Exception:
|
| 48 |
+
page_content = page.get_text()
|
| 49 |
+
page_texts.append(page_content)
|
| 50 |
+
|
| 51 |
+
return page_texts if page_texts else ['']
|
src/benchmarks/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
src/benchmarks/models.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
| 3 |
+
|
| 4 |
+
from typing import List, Dict, Optional
|
| 5 |
+
from pydantic import BaseModel, Field
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class SourceDocument(BaseModel):
|
| 9 |
+
"""Represents a source document used in splicing."""
|
| 10 |
+
|
| 11 |
+
doc_type: str = Field(..., description="Document category/type")
|
| 12 |
+
doc_name: str = Field(..., description="Source document identifier")
|
| 13 |
+
pages: List[int] = Field(..., description="Page numbers used from this document")
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class GroundTruthPage(BaseModel):
|
| 17 |
+
"""Ground truth for a single page in spliced document."""
|
| 18 |
+
|
| 19 |
+
page_num: int = Field(..., ge=1, description="Page number in spliced document")
|
| 20 |
+
doc_type: str = Field(..., description="Document category/type")
|
| 21 |
+
source_doc: str = Field(..., description="Source document identifier")
|
| 22 |
+
source_page: int = Field(..., ge=1, description="Page number in source document")
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class SplicedDocument(BaseModel):
|
| 26 |
+
"""Represents a spliced benchmark document."""
|
| 27 |
+
|
| 28 |
+
spliced_doc_id: str = Field(..., description="Unique identifier for spliced document")
|
| 29 |
+
source_documents: List[SourceDocument] = Field(..., description="Source documents used")
|
| 30 |
+
ground_truth: List[GroundTruthPage] = Field(..., description="Ground truth page mappings")
|
| 31 |
+
total_pages: int = Field(..., gt=0, description="Total pages in spliced document")
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class BenchmarkSet(BaseModel):
|
| 35 |
+
"""Collection of spliced documents for a benchmark."""
|
| 36 |
+
|
| 37 |
+
benchmark_name: str = Field(..., description="Benchmark identifier")
|
| 38 |
+
strategy: str = Field(..., description="Shuffling strategy used")
|
| 39 |
+
split: str = Field(..., description="Dataset split: train, test, or validation")
|
| 40 |
+
created_at: str = Field(..., description="Creation timestamp")
|
| 41 |
+
documents: List[SplicedDocument] = Field(..., description="Spliced documents")
|
| 42 |
+
statistics: Dict[str, int] = Field(default_factory=dict, description="Benchmark statistics")
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class DocumentAsset(BaseModel):
|
| 46 |
+
"""Represents a loaded document asset."""
|
| 47 |
+
|
| 48 |
+
doc_type: str = Field(..., description="Document category/type")
|
| 49 |
+
doc_name: str = Field(..., description="Document identifier")
|
| 50 |
+
filename: str = Field(..., description="PDF filename")
|
| 51 |
+
page_count: int = Field(..., gt=0, description="Number of pages")
|
| 52 |
+
pages: List['PageAsset'] = Field(default_factory=list, description="Page assets")
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class PageAsset(BaseModel):
|
| 56 |
+
"""Represents a single page asset."""
|
| 57 |
+
|
| 58 |
+
page_num: int = Field(..., ge=1, description="Page number")
|
| 59 |
+
image_path: str = Field(..., description="Path to page image")
|
| 60 |
+
text_path: str = Field(..., description="Path to OCR text")
|
| 61 |
+
text_content: Optional[str] = Field(None, description="Loaded text content")
|
src/benchmarks/run.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
Create Benchmarks from Assets
|
| 6 |
+
|
| 7 |
+
Generates document splitting benchmarks from structured assets.
|
| 8 |
+
|
| 9 |
+
Usage:
|
| 10 |
+
python main.py --strategy multi_category_concat
|
| 11 |
+
python main.py --strategy single_category_concat --num-docs-train 500
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import argparse
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
from loguru import logger
|
| 17 |
+
|
| 18 |
+
from services.asset_loader import AssetLoader
|
| 19 |
+
from services.split_manager import SplitManager
|
| 20 |
+
from services.benchmark_generator import BenchmarkGenerator
|
| 21 |
+
from services.benchmark_writer import BenchmarkWriter
|
| 22 |
+
from services.shuffle_strategies import get_strategy, STRATEGIES
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def main():
|
| 26 |
+
parser = argparse.ArgumentParser(description='Create benchmarks from assets')
|
| 27 |
+
|
| 28 |
+
# Paths
|
| 29 |
+
parser.add_argument('--assets-path', default='data/assets',
|
| 30 |
+
help='Path to assets from create_assets')
|
| 31 |
+
parser.add_argument('--output-path', default='data/benchmarks',
|
| 32 |
+
help='Output path for benchmarks')
|
| 33 |
+
parser.add_argument('--split-mapping', default='data/metadata/split_mapping.json',
|
| 34 |
+
help='Path to split mapping JSON (created if not exists)')
|
| 35 |
+
|
| 36 |
+
# Strategy
|
| 37 |
+
parser.add_argument('--strategy', choices=list(STRATEGIES.keys()) + ['all'], default='all',
|
| 38 |
+
help='Shuffle strategy to use (default: all strategies)')
|
| 39 |
+
|
| 40 |
+
# Split configuration
|
| 41 |
+
parser.add_argument('--num-docs-train', type=int, default=800,
|
| 42 |
+
help='Number of spliced documents for training')
|
| 43 |
+
parser.add_argument('--num-docs-test', type=int, default=500,
|
| 44 |
+
help='Number of spliced documents for testing')
|
| 45 |
+
parser.add_argument('--num-docs-val', type=int, default=200,
|
| 46 |
+
help='Number of spliced documents for validation')
|
| 47 |
+
|
| 48 |
+
# Strategy parameters
|
| 49 |
+
parser.add_argument('--size', choices=['small', 'large'], default='small',
|
| 50 |
+
help='Benchmark size: small (5-20 pages) or large (20-500 pages)')
|
| 51 |
+
parser.add_argument('--random-seed', type=int, default=42,
|
| 52 |
+
help='Random seed for reproducibility')
|
| 53 |
+
|
| 54 |
+
args = parser.parse_args()
|
| 55 |
+
|
| 56 |
+
# Set page ranges based on size
|
| 57 |
+
if args.size == 'small':
|
| 58 |
+
min_pages, max_pages = 5, 20
|
| 59 |
+
else: # large
|
| 60 |
+
min_pages, max_pages = 20, 500
|
| 61 |
+
|
| 62 |
+
# Determine which strategies to run
|
| 63 |
+
if args.strategy == 'all':
|
| 64 |
+
strategies_to_run = list(STRATEGIES.keys())
|
| 65 |
+
logger.info(f"Creating benchmarks for all strategies: {strategies_to_run}")
|
| 66 |
+
else:
|
| 67 |
+
strategies_to_run = [args.strategy]
|
| 68 |
+
logger.info(f"Creating benchmark with strategy: {args.strategy}")
|
| 69 |
+
|
| 70 |
+
logger.info(f"Size: {args.size} ({min_pages}-{max_pages} pages)")
|
| 71 |
+
|
| 72 |
+
# Load assets
|
| 73 |
+
loader = AssetLoader(assets_path=args.assets_path)
|
| 74 |
+
documents_by_type = loader.load_all_documents()
|
| 75 |
+
|
| 76 |
+
if not documents_by_type:
|
| 77 |
+
logger.error("No documents loaded. Check assets path.")
|
| 78 |
+
return
|
| 79 |
+
|
| 80 |
+
# Create or load split
|
| 81 |
+
split_manager = SplitManager(random_seed=args.random_seed)
|
| 82 |
+
|
| 83 |
+
if args.split_mapping and Path(args.split_mapping).exists():
|
| 84 |
+
logger.info(f"Loading existing split from {args.split_mapping}")
|
| 85 |
+
splits = split_manager.load_split(args.split_mapping)
|
| 86 |
+
else:
|
| 87 |
+
logger.info("Creating new split")
|
| 88 |
+
splits = split_manager.create_split(documents_by_type)
|
| 89 |
+
|
| 90 |
+
# Save split mapping to metadata folder
|
| 91 |
+
split_path = Path(args.split_mapping)
|
| 92 |
+
split_path.parent.mkdir(parents=True, exist_ok=True)
|
| 93 |
+
split_manager.save_split(splits, str(split_path))
|
| 94 |
+
|
| 95 |
+
# Run for each strategy
|
| 96 |
+
for strategy_name in strategies_to_run:
|
| 97 |
+
logger.info(f"\n{'='*60}")
|
| 98 |
+
logger.info(f"Processing strategy: {strategy_name}")
|
| 99 |
+
logger.info(f"{'='*60}\n")
|
| 100 |
+
|
| 101 |
+
# Initialize strategy
|
| 102 |
+
strategy = get_strategy(
|
| 103 |
+
strategy_name,
|
| 104 |
+
min_pages=min_pages,
|
| 105 |
+
max_pages=max_pages,
|
| 106 |
+
random_seed=args.random_seed
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
# Initialize generator and writer
|
| 110 |
+
generator = BenchmarkGenerator(strategy=strategy)
|
| 111 |
+
writer = BenchmarkWriter(
|
| 112 |
+
output_base_path=str(Path(args.output_path) / strategy_name / args.size),
|
| 113 |
+
assets_path=args.assets_path
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
# Generate benchmarks for each split
|
| 117 |
+
split_configs = [
|
| 118 |
+
('train', args.num_docs_train),
|
| 119 |
+
('test', args.num_docs_test),
|
| 120 |
+
('validation', args.num_docs_val)
|
| 121 |
+
]
|
| 122 |
+
|
| 123 |
+
for split_name, num_docs in split_configs:
|
| 124 |
+
if num_docs <= 0:
|
| 125 |
+
logger.info(f"Skipping {split_name} (num_docs=0)")
|
| 126 |
+
continue
|
| 127 |
+
|
| 128 |
+
logger.info(f"Generating {split_name} benchmark...")
|
| 129 |
+
|
| 130 |
+
benchmark_set = generator.generate_for_split(
|
| 131 |
+
documents_by_type=documents_by_type,
|
| 132 |
+
doc_names_for_split=splits[split_name],
|
| 133 |
+
num_spliced_docs=num_docs,
|
| 134 |
+
split_name=split_name,
|
| 135 |
+
benchmark_name=strategy_name
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
writer.save_benchmark_set(benchmark_set, split_name)
|
| 139 |
+
|
| 140 |
+
logger.info(f"Completed {split_name}: {benchmark_set.statistics}")
|
| 141 |
+
|
| 142 |
+
logger.info("\n" + "="*60)
|
| 143 |
+
logger.info("All benchmark creation complete!")
|
| 144 |
+
logger.info("="*60)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
if __name__ == '__main__':
|
| 148 |
+
main()
|
src/benchmarks/services/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
src/benchmarks/services/asset_loader.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
import csv
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import List, Dict, Optional
|
| 8 |
+
from loguru import logger
|
| 9 |
+
|
| 10 |
+
from models import DocumentAsset, PageAsset
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class AssetLoader:
|
| 14 |
+
"""Loads document assets from create_assets output using document_mapping.csv."""
|
| 15 |
+
|
| 16 |
+
def __init__(self, assets_path: str, mapping_csv: str = None):
|
| 17 |
+
"""Initialize asset loader.
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
assets_path: Path to assets directory
|
| 21 |
+
mapping_csv: Path to document_mapping.csv (default: data/metadata/document_mapping.csv)
|
| 22 |
+
"""
|
| 23 |
+
self.assets_path = Path(assets_path)
|
| 24 |
+
|
| 25 |
+
if mapping_csv is None:
|
| 26 |
+
mapping_csv = "data/metadata/document_mapping.csv"
|
| 27 |
+
|
| 28 |
+
self.mapping_csv = Path(mapping_csv)
|
| 29 |
+
|
| 30 |
+
if not self.mapping_csv.exists():
|
| 31 |
+
raise FileNotFoundError(f"Document mapping not found: {self.mapping_csv}")
|
| 32 |
+
|
| 33 |
+
def load_all_documents(self, doc_types: List[str] = None) -> Dict[str, List[DocumentAsset]]:
|
| 34 |
+
"""Load all document assets grouped by type from CSV.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
doc_types: Optional list of document types to load. If None, loads all.
|
| 38 |
+
|
| 39 |
+
Returns:
|
| 40 |
+
Dict mapping doc_type to list of DocumentAsset objects.
|
| 41 |
+
"""
|
| 42 |
+
documents_by_type = {}
|
| 43 |
+
|
| 44 |
+
# Read document mapping CSV
|
| 45 |
+
with open(self.mapping_csv, 'r', encoding='utf-8') as f:
|
| 46 |
+
reader = csv.DictReader(f)
|
| 47 |
+
for row in reader:
|
| 48 |
+
doc_type = row['type']
|
| 49 |
+
doc_name = row['doc_name']
|
| 50 |
+
filename = row['filename']
|
| 51 |
+
page_count = int(row['pages'])
|
| 52 |
+
|
| 53 |
+
# Filter by doc_types if specified
|
| 54 |
+
if doc_types and doc_type not in doc_types:
|
| 55 |
+
continue
|
| 56 |
+
|
| 57 |
+
# Build document asset
|
| 58 |
+
doc_asset = self._create_document_asset(
|
| 59 |
+
doc_type=doc_type,
|
| 60 |
+
doc_name=doc_name,
|
| 61 |
+
filename=filename,
|
| 62 |
+
page_count=page_count
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
if doc_type not in documents_by_type:
|
| 66 |
+
documents_by_type[doc_type] = []
|
| 67 |
+
|
| 68 |
+
documents_by_type[doc_type].append(doc_asset)
|
| 69 |
+
|
| 70 |
+
total = sum(len(docs) for docs in documents_by_type.values())
|
| 71 |
+
logger.info(f"Loaded {total} documents across {len(documents_by_type)} types from CSV")
|
| 72 |
+
|
| 73 |
+
return documents_by_type
|
| 74 |
+
|
| 75 |
+
def _create_document_asset(
|
| 76 |
+
self,
|
| 77 |
+
doc_type: str,
|
| 78 |
+
doc_name: str,
|
| 79 |
+
filename: str,
|
| 80 |
+
page_count: int
|
| 81 |
+
) -> DocumentAsset:
|
| 82 |
+
"""Create DocumentAsset with page information.
|
| 83 |
+
|
| 84 |
+
Args:
|
| 85 |
+
doc_type: Document category
|
| 86 |
+
doc_name: Document identifier
|
| 87 |
+
filename: PDF filename
|
| 88 |
+
page_count: Number of pages
|
| 89 |
+
|
| 90 |
+
Returns:
|
| 91 |
+
DocumentAsset with populated pages
|
| 92 |
+
"""
|
| 93 |
+
pages = []
|
| 94 |
+
|
| 95 |
+
for page_num in range(1, page_count + 1):
|
| 96 |
+
page_num_str = f"{page_num:04d}"
|
| 97 |
+
|
| 98 |
+
# Construct paths based on known structure
|
| 99 |
+
page_dir = self.assets_path / doc_type / filename / "pages" / page_num_str
|
| 100 |
+
image_path = page_dir / f"page-{page_num_str}.png"
|
| 101 |
+
text_path = page_dir / f"page-{page_num_str}-textract.md"
|
| 102 |
+
|
| 103 |
+
page_asset = PageAsset(
|
| 104 |
+
page_num=page_num,
|
| 105 |
+
image_path=str(image_path),
|
| 106 |
+
text_path=str(text_path),
|
| 107 |
+
text_content=None # Loaded on demand
|
| 108 |
+
)
|
| 109 |
+
pages.append(page_asset)
|
| 110 |
+
|
| 111 |
+
return DocumentAsset(
|
| 112 |
+
doc_type=doc_type,
|
| 113 |
+
doc_name=doc_name,
|
| 114 |
+
filename=filename,
|
| 115 |
+
page_count=page_count,
|
| 116 |
+
pages=pages
|
| 117 |
+
)
|
src/benchmarks/services/benchmark_generator.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
| 3 |
+
|
| 4 |
+
from typing import Dict, List
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
from loguru import logger
|
| 7 |
+
|
| 8 |
+
from models import DocumentAsset, BenchmarkSet
|
| 9 |
+
from services.shuffle_strategies.base_strategy import BaseStrategy
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class BenchmarkGenerator:
|
| 13 |
+
"""Orchestrates benchmark generation."""
|
| 14 |
+
|
| 15 |
+
def __init__(self, strategy: BaseStrategy):
|
| 16 |
+
self.strategy = strategy
|
| 17 |
+
|
| 18 |
+
def generate_for_split(
|
| 19 |
+
self,
|
| 20 |
+
documents_by_type: Dict[str, List[DocumentAsset]],
|
| 21 |
+
doc_names_for_split: Dict[str, List[str]],
|
| 22 |
+
num_spliced_docs: int,
|
| 23 |
+
split_name: str,
|
| 24 |
+
benchmark_name: str
|
| 25 |
+
) -> BenchmarkSet:
|
| 26 |
+
"""Generate benchmark set for a specific split.
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
documents_by_type: All available documents grouped by type
|
| 30 |
+
doc_names_for_split: Document names to use for this split
|
| 31 |
+
num_spliced_docs: Number of spliced documents to generate
|
| 32 |
+
split_name: Name of the split (train, test, validation)
|
| 33 |
+
benchmark_name: Name of the benchmark
|
| 34 |
+
|
| 35 |
+
Returns:
|
| 36 |
+
BenchmarkSet object
|
| 37 |
+
"""
|
| 38 |
+
logger.info(f"Generating {num_spliced_docs} documents for {split_name} split")
|
| 39 |
+
|
| 40 |
+
spliced_documents = self.strategy.generate(
|
| 41 |
+
documents_by_type=documents_by_type,
|
| 42 |
+
doc_names_for_split=doc_names_for_split,
|
| 43 |
+
num_spliced_docs=num_spliced_docs
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
# Calculate statistics
|
| 47 |
+
statistics = self._calculate_statistics(spliced_documents)
|
| 48 |
+
|
| 49 |
+
benchmark_set = BenchmarkSet(
|
| 50 |
+
benchmark_name=benchmark_name,
|
| 51 |
+
strategy=self.strategy.__class__.__name__,
|
| 52 |
+
split=split_name,
|
| 53 |
+
created_at=datetime.now().isoformat(),
|
| 54 |
+
documents=spliced_documents,
|
| 55 |
+
statistics=statistics
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
logger.info(f"Generated benchmark set with {len(spliced_documents)} documents")
|
| 59 |
+
return benchmark_set
|
| 60 |
+
|
| 61 |
+
def _calculate_statistics(self, spliced_documents: List) -> Dict[str, int]:
|
| 62 |
+
"""Calculate statistics for the benchmark set."""
|
| 63 |
+
total_pages = sum(doc.total_pages for doc in spliced_documents)
|
| 64 |
+
total_source_docs = sum(len(doc.source_documents) for doc in spliced_documents)
|
| 65 |
+
|
| 66 |
+
doc_types = set()
|
| 67 |
+
for doc in spliced_documents:
|
| 68 |
+
for source in doc.source_documents:
|
| 69 |
+
doc_types.add(source.doc_type)
|
| 70 |
+
|
| 71 |
+
return {
|
| 72 |
+
'total_spliced_documents': len(spliced_documents),
|
| 73 |
+
'total_pages': total_pages,
|
| 74 |
+
'total_source_documents': total_source_docs,
|
| 75 |
+
'unique_doc_types': len(doc_types),
|
| 76 |
+
'avg_pages_per_document': int(total_pages / len(spliced_documents)) if spliced_documents else 0
|
| 77 |
+
}
|
src/benchmarks/services/benchmark_writer.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
| 3 |
+
|
| 4 |
+
import csv
|
| 5 |
+
import json
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from loguru import logger
|
| 8 |
+
|
| 9 |
+
from models import BenchmarkSet
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class BenchmarkWriter:
|
| 13 |
+
"""Writes benchmark datasets to disk as CSV files."""
|
| 14 |
+
|
| 15 |
+
def __init__(self, output_base_path: str, assets_path: str = "data/assets"):
|
| 16 |
+
self.output_base_path = Path(output_base_path)
|
| 17 |
+
self.assets_path = Path(assets_path)
|
| 18 |
+
|
| 19 |
+
def save_benchmark_set(
|
| 20 |
+
self,
|
| 21 |
+
benchmark_set: BenchmarkSet,
|
| 22 |
+
split: str
|
| 23 |
+
):
|
| 24 |
+
"""Save a benchmark set to CSV and ground truth JSON files.
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
benchmark_set: The benchmark set to save
|
| 28 |
+
split: Split name (train, test, or validation)
|
| 29 |
+
"""
|
| 30 |
+
self.output_base_path.mkdir(parents=True, exist_ok=True)
|
| 31 |
+
|
| 32 |
+
output_path = self.output_base_path / f"{split}.csv"
|
| 33 |
+
json_dir = self.output_base_path / "ground_truth_json" / split
|
| 34 |
+
json_dir.mkdir(parents=True, exist_ok=True)
|
| 35 |
+
|
| 36 |
+
# Generate group IDs (AA, AB, AC, etc.)
|
| 37 |
+
def generate_group_id(index):
|
| 38 |
+
"""Generate group_id like AA, AB, AC, ..., AZ, BA, BB, etc."""
|
| 39 |
+
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
| 40 |
+
first_char = alphabet[index // 26 % 26]
|
| 41 |
+
second_char = alphabet[index % 26]
|
| 42 |
+
return first_char + second_char
|
| 43 |
+
|
| 44 |
+
# Flatten ground truth pages into CSV rows
|
| 45 |
+
rows = []
|
| 46 |
+
|
| 47 |
+
# Track unique source documents across all packets for group_id assignment
|
| 48 |
+
source_doc_to_group_id = {}
|
| 49 |
+
group_id_counter = 0
|
| 50 |
+
|
| 51 |
+
for doc_idx, doc in enumerate(benchmark_set.documents):
|
| 52 |
+
# Track page ordinal per source document
|
| 53 |
+
source_doc_ordinals = {}
|
| 54 |
+
# Track local_doc_id per source document within this packet
|
| 55 |
+
source_doc_to_local_id = {}
|
| 56 |
+
local_id_counter = {}
|
| 57 |
+
|
| 58 |
+
for gt_page in doc.ground_truth:
|
| 59 |
+
source_key = gt_page.source_doc
|
| 60 |
+
|
| 61 |
+
# Assign group_id per unique source document
|
| 62 |
+
if source_key not in source_doc_to_group_id:
|
| 63 |
+
source_doc_to_group_id[source_key] = generate_group_id(group_id_counter)
|
| 64 |
+
group_id_counter += 1
|
| 65 |
+
|
| 66 |
+
group_id = source_doc_to_group_id[source_key]
|
| 67 |
+
|
| 68 |
+
# Increment ordinal for this source document
|
| 69 |
+
if source_key not in source_doc_ordinals:
|
| 70 |
+
source_doc_ordinals[source_key] = 0
|
| 71 |
+
source_doc_ordinals[source_key] += 1
|
| 72 |
+
|
| 73 |
+
# Assign local_doc_id per source document
|
| 74 |
+
if source_key not in source_doc_to_local_id:
|
| 75 |
+
doc_type_prefix = gt_page.doc_type.replace(" ", "-").lower()
|
| 76 |
+
if doc_type_prefix not in local_id_counter:
|
| 77 |
+
local_id_counter[doc_type_prefix] = 0
|
| 78 |
+
local_id_counter[doc_type_prefix] += 1
|
| 79 |
+
source_doc_to_local_id[source_key] = f"{doc_type_prefix}-{local_id_counter[doc_type_prefix]:02d}"
|
| 80 |
+
|
| 81 |
+
local_doc_id = source_doc_to_local_id[source_key]
|
| 82 |
+
|
| 83 |
+
# Build paths
|
| 84 |
+
image_path = f"{self.assets_path}/{gt_page.doc_type}/{gt_page.source_doc}.pdf/pages/{gt_page.source_page:04d}/page-{gt_page.source_page:04d}.png"
|
| 85 |
+
text_path = f"{self.assets_path}/{gt_page.doc_type}/{gt_page.source_doc}.pdf/pages/{gt_page.source_page:04d}/page-{gt_page.source_page:04d}-textract.md"
|
| 86 |
+
|
| 87 |
+
rows.append({
|
| 88 |
+
'doc_type': gt_page.doc_type,
|
| 89 |
+
'original_doc_name': gt_page.source_doc,
|
| 90 |
+
'parent_doc_name': doc.spliced_doc_id,
|
| 91 |
+
'local_doc_id': local_doc_id,
|
| 92 |
+
'page': gt_page.page_num,
|
| 93 |
+
'image_path': image_path,
|
| 94 |
+
'text_path': text_path,
|
| 95 |
+
'group_id': group_id,
|
| 96 |
+
'local_doc_id_page_ordinal': source_doc_ordinals[source_key]
|
| 97 |
+
})
|
| 98 |
+
|
| 99 |
+
# Generate ground truth JSON for this parent document
|
| 100 |
+
self._save_ground_truth_json(doc, json_dir)
|
| 101 |
+
|
| 102 |
+
# Write CSV
|
| 103 |
+
if rows:
|
| 104 |
+
fieldnames = ['doc_type', 'original_doc_name', 'parent_doc_name', 'local_doc_id',
|
| 105 |
+
'page', 'image_path', 'text_path', 'group_id', 'local_doc_id_page_ordinal']
|
| 106 |
+
with open(output_path, 'w', newline='', encoding='utf-8') as f:
|
| 107 |
+
writer = csv.DictWriter(f, fieldnames=fieldnames)
|
| 108 |
+
writer.writeheader()
|
| 109 |
+
writer.writerows(rows)
|
| 110 |
+
|
| 111 |
+
logger.info(f"Saved {len(benchmark_set.documents)} spliced documents ({len(rows)} pages) to {output_path}")
|
| 112 |
+
logger.info(f"Saved {len(benchmark_set.documents)} ground truth JSON files to {json_dir}")
|
| 113 |
+
|
| 114 |
+
def _save_ground_truth_json(self, doc, json_dir: Path):
|
| 115 |
+
"""Generate ground truth JSON for a single parent document."""
|
| 116 |
+
# Group pages by source document (doc_type, source_doc)
|
| 117 |
+
subdocs = {}
|
| 118 |
+
for gt_page in doc.ground_truth:
|
| 119 |
+
key = (gt_page.doc_type, gt_page.source_doc)
|
| 120 |
+
if key not in subdocs:
|
| 121 |
+
subdocs[key] = []
|
| 122 |
+
subdocs[key].append(gt_page)
|
| 123 |
+
|
| 124 |
+
# Build subdocuments structure
|
| 125 |
+
subdocuments = []
|
| 126 |
+
|
| 127 |
+
for subdoc_idx, ((doc_type, source_doc), pages) in enumerate(subdocs.items()):
|
| 128 |
+
group_id = self._generate_group_id(subdoc_idx)
|
| 129 |
+
|
| 130 |
+
# Count occurrences of this doc_type so far
|
| 131 |
+
doc_type_count = sum(1 for s in subdocuments if s['doc_type_id'] == doc_type) + 1
|
| 132 |
+
local_doc_id = f"{doc_type}-{doc_type_count:02d}"
|
| 133 |
+
|
| 134 |
+
page_ordinals = [p.page_num for p in pages]
|
| 135 |
+
|
| 136 |
+
subdoc_pages = []
|
| 137 |
+
for idx, gt_page in enumerate(pages, 1):
|
| 138 |
+
image_path = f"{self.assets_path}/{gt_page.doc_type}/{gt_page.source_doc}.pdf/pages/{gt_page.source_page:04d}/page-{gt_page.source_page:04d}.png"
|
| 139 |
+
text_path = f"{self.assets_path}/{gt_page.doc_type}/{gt_page.source_doc}.pdf/pages/{gt_page.source_page:04d}/page-{gt_page.source_page:04d}-textract.md"
|
| 140 |
+
|
| 141 |
+
subdoc_pages.append({
|
| 142 |
+
"page": gt_page.page_num,
|
| 143 |
+
"original_doc_name": gt_page.source_doc,
|
| 144 |
+
"image_path": image_path,
|
| 145 |
+
"text_path": text_path,
|
| 146 |
+
"local_doc_id_page_ordinal": idx
|
| 147 |
+
})
|
| 148 |
+
|
| 149 |
+
subdocuments.append({
|
| 150 |
+
"doc_type_id": doc_type,
|
| 151 |
+
"page_ordinals": page_ordinals,
|
| 152 |
+
"local_doc_id": local_doc_id,
|
| 153 |
+
"group_id": group_id,
|
| 154 |
+
"pages": subdoc_pages
|
| 155 |
+
})
|
| 156 |
+
|
| 157 |
+
# Build final JSON structure
|
| 158 |
+
ground_truth = {
|
| 159 |
+
"doc_id": doc.spliced_doc_id,
|
| 160 |
+
"total_pages": doc.total_pages,
|
| 161 |
+
"subdocuments": subdocuments
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
# Write JSON file
|
| 165 |
+
json_path = json_dir / f"{doc.spliced_doc_id}.json"
|
| 166 |
+
with open(json_path, 'w', encoding='utf-8') as f:
|
| 167 |
+
json.dump(ground_truth, f, indent=2)
|
| 168 |
+
|
| 169 |
+
def _generate_group_id(self, index):
|
| 170 |
+
"""Generate group_id like AA, AB, AC, ..., AZ, BA, BB, etc."""
|
| 171 |
+
result = ""
|
| 172 |
+
index += 1
|
| 173 |
+
while index > 0:
|
| 174 |
+
index -= 1
|
| 175 |
+
result = chr(65 + (index % 26)) + result
|
| 176 |
+
index //= 26
|
| 177 |
+
return result or "A"
|
src/benchmarks/services/shuffle_strategies/__init__.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
| 3 |
+
|
| 4 |
+
from .base_strategy import BaseStrategy
|
| 5 |
+
from .mono_seq import MonoSeq
|
| 6 |
+
from .mono_rand import MonoRand
|
| 7 |
+
from .poly_seq import PolySeq
|
| 8 |
+
from .poly_int import PolyInt
|
| 9 |
+
from .poly_rand import PolyRand
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
STRATEGIES = {
|
| 13 |
+
'mono_seq': MonoSeq,
|
| 14 |
+
'mono_rand': MonoRand,
|
| 15 |
+
'poly_seq': PolySeq,
|
| 16 |
+
'poly_int': PolyInt,
|
| 17 |
+
'poly_rand': PolyRand,
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def get_strategy(name: str, **kwargs) -> BaseStrategy:
|
| 22 |
+
"""Get a strategy instance by name."""
|
| 23 |
+
if name not in STRATEGIES:
|
| 24 |
+
raise ValueError(f"Unknown strategy: {name}. Available: {list(STRATEGIES.keys())}")
|
| 25 |
+
return STRATEGIES[name](**kwargs)
|
src/benchmarks/services/shuffle_strategies/base_strategy.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
| 3 |
+
|
| 4 |
+
from abc import ABC, abstractmethod
|
| 5 |
+
from typing import List, Dict
|
| 6 |
+
import random
|
| 7 |
+
|
| 8 |
+
from models import DocumentAsset, SplicedDocument
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class BaseStrategy(ABC):
|
| 12 |
+
"""Abstract base class for shuffle strategies."""
|
| 13 |
+
|
| 14 |
+
def __init__(
|
| 15 |
+
self,
|
| 16 |
+
min_pages: int = 17,
|
| 17 |
+
max_pages: int = 20,
|
| 18 |
+
random_seed: int = 42
|
| 19 |
+
):
|
| 20 |
+
self.min_pages = min_pages
|
| 21 |
+
self.max_pages = max_pages
|
| 22 |
+
self.random_seed = random_seed
|
| 23 |
+
self.rng = random.Random(random_seed) # nosec B311 - non-cryptographic use for benchmark shuffling
|
| 24 |
+
|
| 25 |
+
@abstractmethod
|
| 26 |
+
def generate(
|
| 27 |
+
self,
|
| 28 |
+
documents_by_type: Dict[str, List[DocumentAsset]],
|
| 29 |
+
doc_names_for_split: Dict[str, List[str]],
|
| 30 |
+
num_spliced_docs: int
|
| 31 |
+
) -> List[SplicedDocument]:
|
| 32 |
+
"""Generate spliced documents using this strategy.
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
documents_by_type: All available documents grouped by type
|
| 36 |
+
doc_names_for_split: Document names to use for this split
|
| 37 |
+
num_spliced_docs: Number of spliced documents to generate
|
| 38 |
+
|
| 39 |
+
Returns:
|
| 40 |
+
List of SplicedDocument objects
|
| 41 |
+
"""
|
| 42 |
+
pass
|
| 43 |
+
|
| 44 |
+
def _get_available_docs(
|
| 45 |
+
self,
|
| 46 |
+
documents_by_type: Dict[str, List[DocumentAsset]],
|
| 47 |
+
doc_names_for_split: Dict[str, List[str]]
|
| 48 |
+
) -> Dict[str, List[DocumentAsset]]:
|
| 49 |
+
"""Filter documents to only those in the split."""
|
| 50 |
+
available = {}
|
| 51 |
+
|
| 52 |
+
for doc_type, doc_names in doc_names_for_split.items():
|
| 53 |
+
if doc_type not in documents_by_type:
|
| 54 |
+
continue
|
| 55 |
+
|
| 56 |
+
doc_name_set = set(doc_names)
|
| 57 |
+
available[doc_type] = [
|
| 58 |
+
doc for doc in documents_by_type[doc_type]
|
| 59 |
+
if doc.doc_name in doc_name_set
|
| 60 |
+
]
|
| 61 |
+
|
| 62 |
+
return available
|
| 63 |
+
|
| 64 |
+
def _get_random_doc(
|
| 65 |
+
self,
|
| 66 |
+
available_docs: Dict[str, List[DocumentAsset]],
|
| 67 |
+
doc_type: str = None
|
| 68 |
+
) -> DocumentAsset:
|
| 69 |
+
"""Get a random document, optionally from a specific type."""
|
| 70 |
+
if doc_type:
|
| 71 |
+
if doc_type not in available_docs or not available_docs[doc_type]:
|
| 72 |
+
raise ValueError(f"No documents available for type: {doc_type}")
|
| 73 |
+
return self.rng.choice(available_docs[doc_type])
|
| 74 |
+
else:
|
| 75 |
+
all_docs = [doc for docs in available_docs.values() for doc in docs]
|
| 76 |
+
if not all_docs:
|
| 77 |
+
raise ValueError("No documents available")
|
| 78 |
+
return self.rng.choice(all_docs)
|
| 79 |
+
|
| 80 |
+
def _get_random_pages(
|
| 81 |
+
self,
|
| 82 |
+
doc: DocumentAsset,
|
| 83 |
+
num_pages: int = None
|
| 84 |
+
) -> List[int]:
|
| 85 |
+
"""Get random page numbers from a document."""
|
| 86 |
+
if num_pages is None:
|
| 87 |
+
num_pages = self.rng.randint(self.min_pages, self.max_pages)
|
| 88 |
+
|
| 89 |
+
num_pages = min(num_pages, doc.page_count)
|
| 90 |
+
return self.rng.sample(range(1, doc.page_count + 1), num_pages)
|
src/benchmarks/services/shuffle_strategies/mono_rand.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
| 3 |
+
|
| 4 |
+
from typing import List, Dict
|
| 5 |
+
import uuid
|
| 6 |
+
from loguru import logger
|
| 7 |
+
|
| 8 |
+
from models import DocumentAsset, SplicedDocument, SourceDocument, GroundTruthPage
|
| 9 |
+
from services.shuffle_strategies.base_strategy import BaseStrategy
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class MonoRand(BaseStrategy):
|
| 13 |
+
"""DocSplit-Mono-Rand: Single category document pages randomization.
|
| 14 |
+
|
| 15 |
+
Similar to Mono-Seq but shuffles pages from concatenated documents.
|
| 16 |
+
Tests models' robustness to page-level disruptions common in manual
|
| 17 |
+
document assembly, requiring boundary detection and page sequence reconstruction.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
def generate(
|
| 21 |
+
self,
|
| 22 |
+
documents_by_type: Dict[str, List[DocumentAsset]],
|
| 23 |
+
doc_names_for_split: Dict[str, List[str]],
|
| 24 |
+
num_spliced_docs: int
|
| 25 |
+
) -> List[SplicedDocument]:
|
| 26 |
+
|
| 27 |
+
available_docs = self._get_available_docs(documents_by_type, doc_names_for_split)
|
| 28 |
+
|
| 29 |
+
# Filter out language category for large datasets (min_pages >= 20)
|
| 30 |
+
if self.min_pages >= 20 and "language" in available_docs:
|
| 31 |
+
del available_docs["language"]
|
| 32 |
+
spliced_documents = []
|
| 33 |
+
|
| 34 |
+
for i in range(num_spliced_docs):
|
| 35 |
+
# Pick a random document type
|
| 36 |
+
doc_type = self.rng.choice(list(available_docs.keys()))
|
| 37 |
+
|
| 38 |
+
# Set target page count
|
| 39 |
+
target_pages = self.rng.randint(self.min_pages, self.max_pages)
|
| 40 |
+
|
| 41 |
+
# Keep adding entire documents until reaching target
|
| 42 |
+
source_documents = []
|
| 43 |
+
ground_truth = []
|
| 44 |
+
current_page = 1
|
| 45 |
+
used_docs = set()
|
| 46 |
+
|
| 47 |
+
while current_page - 1 < target_pages:
|
| 48 |
+
available = [d for d in available_docs[doc_type]
|
| 49 |
+
if d.doc_name not in used_docs and d.page_count <= self.max_pages]
|
| 50 |
+
|
| 51 |
+
if not available:
|
| 52 |
+
break
|
| 53 |
+
|
| 54 |
+
doc = self.rng.choice(available)
|
| 55 |
+
used_docs.add(doc.doc_name)
|
| 56 |
+
|
| 57 |
+
if current_page - 1 + doc.page_count > self.max_pages:
|
| 58 |
+
continue
|
| 59 |
+
|
| 60 |
+
# Add all pages but shuffle their order
|
| 61 |
+
pages = list(range(1, doc.page_count + 1))
|
| 62 |
+
self.rng.shuffle(pages)
|
| 63 |
+
|
| 64 |
+
source_documents.append(SourceDocument(
|
| 65 |
+
doc_type=doc.doc_type,
|
| 66 |
+
doc_name=doc.doc_name,
|
| 67 |
+
pages=pages
|
| 68 |
+
))
|
| 69 |
+
|
| 70 |
+
for source_page in pages:
|
| 71 |
+
ground_truth.append(GroundTruthPage(
|
| 72 |
+
page_num=current_page,
|
| 73 |
+
doc_type=doc.doc_type,
|
| 74 |
+
source_doc=doc.doc_name,
|
| 75 |
+
source_page=source_page
|
| 76 |
+
))
|
| 77 |
+
current_page += 1
|
| 78 |
+
|
| 79 |
+
if current_page - 1 >= self.min_pages:
|
| 80 |
+
spliced_doc = SplicedDocument(
|
| 81 |
+
spliced_doc_id=str(uuid.uuid4()),
|
| 82 |
+
source_documents=source_documents,
|
| 83 |
+
ground_truth=ground_truth,
|
| 84 |
+
total_pages=current_page - 1
|
| 85 |
+
)
|
| 86 |
+
spliced_documents.append(spliced_doc)
|
| 87 |
+
|
| 88 |
+
logger.info(f"Generated {len(spliced_documents)} DocSplit-Mono-Rand documents")
|
| 89 |
+
return spliced_documents
|
src/benchmarks/services/shuffle_strategies/mono_seq.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
| 3 |
+
|
| 4 |
+
from typing import List, Dict
|
| 5 |
+
import uuid
|
| 6 |
+
from loguru import logger
|
| 7 |
+
|
| 8 |
+
from models import DocumentAsset, SplicedDocument, SourceDocument, GroundTruthPage
|
| 9 |
+
from services.shuffle_strategies.base_strategy import BaseStrategy
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class MonoSeq(BaseStrategy):
|
| 13 |
+
"""DocSplit-Mono-Seq: Single category document concatenation sequentially.
|
| 14 |
+
|
| 15 |
+
Creates document packets by concatenating entire documents from the same category
|
| 16 |
+
while preserving original page order. Tests boundary detection without
|
| 17 |
+
category transitions as discriminative signals.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
def generate(
|
| 21 |
+
self,
|
| 22 |
+
documents_by_type: Dict[str, List[DocumentAsset]],
|
| 23 |
+
doc_names_for_split: Dict[str, List[str]],
|
| 24 |
+
num_spliced_docs: int
|
| 25 |
+
) -> List[SplicedDocument]:
|
| 26 |
+
|
| 27 |
+
available_docs = self._get_available_docs(documents_by_type, doc_names_for_split)
|
| 28 |
+
|
| 29 |
+
# Filter out language category for large datasets (min_pages >= 20)
|
| 30 |
+
if self.min_pages >= 20 and "language" in available_docs:
|
| 31 |
+
del available_docs["language"]
|
| 32 |
+
spliced_documents = []
|
| 33 |
+
|
| 34 |
+
for i in range(num_spliced_docs):
|
| 35 |
+
# Pick a random document type
|
| 36 |
+
doc_type = self.rng.choice(list(available_docs.keys()))
|
| 37 |
+
|
| 38 |
+
# Set target page count
|
| 39 |
+
target_pages = self.rng.randint(self.min_pages, self.max_pages)
|
| 40 |
+
|
| 41 |
+
# Keep adding entire documents until reaching target
|
| 42 |
+
source_documents = []
|
| 43 |
+
ground_truth = []
|
| 44 |
+
current_page = 1
|
| 45 |
+
used_docs = set()
|
| 46 |
+
|
| 47 |
+
while current_page - 1 < target_pages:
|
| 48 |
+
# Get available docs of this type that haven't been used
|
| 49 |
+
available = [d for d in available_docs[doc_type]
|
| 50 |
+
if d.doc_name not in used_docs and d.page_count <= self.max_pages]
|
| 51 |
+
|
| 52 |
+
if not available:
|
| 53 |
+
break
|
| 54 |
+
|
| 55 |
+
# Pick a random document
|
| 56 |
+
doc = self.rng.choice(available)
|
| 57 |
+
used_docs.add(doc.doc_name)
|
| 58 |
+
|
| 59 |
+
# Check if adding this doc would exceed max_pages
|
| 60 |
+
if current_page - 1 + doc.page_count > self.max_pages:
|
| 61 |
+
continue
|
| 62 |
+
|
| 63 |
+
# Add all pages from this document in original order
|
| 64 |
+
pages = list(range(1, doc.page_count + 1))
|
| 65 |
+
|
| 66 |
+
source_documents.append(SourceDocument(
|
| 67 |
+
doc_type=doc.doc_type,
|
| 68 |
+
doc_name=doc.doc_name,
|
| 69 |
+
pages=pages
|
| 70 |
+
))
|
| 71 |
+
|
| 72 |
+
# Add ground truth for each page
|
| 73 |
+
for source_page in pages:
|
| 74 |
+
ground_truth.append(GroundTruthPage(
|
| 75 |
+
page_num=current_page,
|
| 76 |
+
doc_type=doc.doc_type,
|
| 77 |
+
source_doc=doc.doc_name,
|
| 78 |
+
source_page=source_page
|
| 79 |
+
))
|
| 80 |
+
current_page += 1
|
| 81 |
+
|
| 82 |
+
# Only add if we have at least min_pages
|
| 83 |
+
if current_page - 1 >= self.min_pages:
|
| 84 |
+
spliced_doc = SplicedDocument(
|
| 85 |
+
spliced_doc_id=str(uuid.uuid4()),
|
| 86 |
+
source_documents=source_documents,
|
| 87 |
+
ground_truth=ground_truth,
|
| 88 |
+
total_pages=current_page - 1
|
| 89 |
+
)
|
| 90 |
+
spliced_documents.append(spliced_doc)
|
| 91 |
+
|
| 92 |
+
logger.info(f"Generated {len(spliced_documents)} DocSplit-Mono-Seq documents")
|
| 93 |
+
return spliced_documents
|
src/benchmarks/services/shuffle_strategies/poly_int.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
| 3 |
+
|
| 4 |
+
from typing import List, Dict
|
| 5 |
+
import uuid
|
| 6 |
+
from loguru import logger
|
| 7 |
+
|
| 8 |
+
from models import DocumentAsset, SplicedDocument, SourceDocument, GroundTruthPage
|
| 9 |
+
from services.shuffle_strategies.base_strategy import BaseStrategy
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class PolyInt(BaseStrategy):
|
| 13 |
+
"""DocSplit-Poly-Int: Multi category document pages interleaving.
|
| 14 |
+
|
| 15 |
+
Similar to Poly-Seq but interleaves pages in a round-robin fashion.
|
| 16 |
+
Simulates batch processing scenarios such as mortgage processing where
|
| 17 |
+
property deeds, tax records, and legal notices are interspersed.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
def generate(
|
| 21 |
+
self,
|
| 22 |
+
documents_by_type: Dict[str, List[DocumentAsset]],
|
| 23 |
+
doc_names_for_split: Dict[str, List[str]],
|
| 24 |
+
num_spliced_docs: int
|
| 25 |
+
) -> List[SplicedDocument]:
|
| 26 |
+
|
| 27 |
+
available_docs = self._get_available_docs(documents_by_type, doc_names_for_split)
|
| 28 |
+
doc_types = list(available_docs.keys())
|
| 29 |
+
|
| 30 |
+
if len(doc_types) < 2:
|
| 31 |
+
raise ValueError("Need at least 2 document types for multi-category strategy")
|
| 32 |
+
|
| 33 |
+
spliced_documents = []
|
| 34 |
+
|
| 35 |
+
for i in range(num_spliced_docs):
|
| 36 |
+
# Set target page count
|
| 37 |
+
target_pages = self.rng.randint(self.min_pages, self.max_pages)
|
| 38 |
+
|
| 39 |
+
# Collect entire documents from different types
|
| 40 |
+
selected_docs = []
|
| 41 |
+
used_types = set()
|
| 42 |
+
total_pages = 0
|
| 43 |
+
|
| 44 |
+
while total_pages < target_pages:
|
| 45 |
+
available_types = [t for t in doc_types if t not in used_types]
|
| 46 |
+
if not available_types:
|
| 47 |
+
break
|
| 48 |
+
|
| 49 |
+
doc_type = self.rng.choice(available_types)
|
| 50 |
+
used_types.add(doc_type)
|
| 51 |
+
|
| 52 |
+
available = [d for d in available_docs[doc_type] if d.page_count <= self.max_pages]
|
| 53 |
+
if not available:
|
| 54 |
+
continue
|
| 55 |
+
|
| 56 |
+
doc = self.rng.choice(available)
|
| 57 |
+
|
| 58 |
+
if total_pages + doc.page_count > self.max_pages:
|
| 59 |
+
continue
|
| 60 |
+
|
| 61 |
+
selected_docs.append(doc)
|
| 62 |
+
total_pages += doc.page_count
|
| 63 |
+
|
| 64 |
+
# Collect all pages from selected documents
|
| 65 |
+
all_pages = []
|
| 66 |
+
for doc in selected_docs:
|
| 67 |
+
for page_num in range(1, doc.page_count + 1):
|
| 68 |
+
all_pages.append({
|
| 69 |
+
'doc_type': doc.doc_type,
|
| 70 |
+
'doc_name': doc.doc_name,
|
| 71 |
+
'source_page': page_num
|
| 72 |
+
})
|
| 73 |
+
|
| 74 |
+
# Interleave pages (shuffle them)
|
| 75 |
+
self.rng.shuffle(all_pages)
|
| 76 |
+
|
| 77 |
+
# Build source documents and ground truth
|
| 78 |
+
source_docs_dict = {}
|
| 79 |
+
ground_truth = []
|
| 80 |
+
|
| 81 |
+
for idx, page_info in enumerate(all_pages, start=1):
|
| 82 |
+
doc_key = (page_info['doc_type'], page_info['doc_name'])
|
| 83 |
+
|
| 84 |
+
if doc_key not in source_docs_dict:
|
| 85 |
+
source_docs_dict[doc_key] = []
|
| 86 |
+
|
| 87 |
+
source_docs_dict[doc_key].append(page_info['source_page'])
|
| 88 |
+
|
| 89 |
+
ground_truth.append(GroundTruthPage(
|
| 90 |
+
page_num=idx,
|
| 91 |
+
doc_type=page_info['doc_type'],
|
| 92 |
+
source_doc=page_info['doc_name'],
|
| 93 |
+
source_page=page_info['source_page']
|
| 94 |
+
))
|
| 95 |
+
|
| 96 |
+
source_documents = [
|
| 97 |
+
SourceDocument(doc_type=doc_type, doc_name=doc_name, pages=pages)
|
| 98 |
+
for (doc_type, doc_name), pages in source_docs_dict.items()
|
| 99 |
+
]
|
| 100 |
+
|
| 101 |
+
if len(all_pages) >= self.min_pages:
|
| 102 |
+
spliced_doc = SplicedDocument(
|
| 103 |
+
spliced_doc_id=str(uuid.uuid4()),
|
| 104 |
+
source_documents=source_documents,
|
| 105 |
+
ground_truth=ground_truth,
|
| 106 |
+
total_pages=len(all_pages)
|
| 107 |
+
)
|
| 108 |
+
spliced_documents.append(spliced_doc)
|
| 109 |
+
|
| 110 |
+
logger.info(f"Generated {len(spliced_documents)} DocSplit-Poly-Int documents")
|
| 111 |
+
return spliced_documents
|
src/benchmarks/services/shuffle_strategies/poly_rand.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
| 3 |
+
|
| 4 |
+
from typing import List, Dict
|
| 5 |
+
import uuid
|
| 6 |
+
from loguru import logger
|
| 7 |
+
|
| 8 |
+
from models import DocumentAsset, SplicedDocument, SourceDocument, GroundTruthPage
|
| 9 |
+
from services.shuffle_strategies.base_strategy import BaseStrategy
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class PolyRand(BaseStrategy):
|
| 13 |
+
"""DocSplit-Poly-Rand: Multi category document pages randomization.
|
| 14 |
+
|
| 15 |
+
Similar to Poly-Seq but applies complete randomization across all pages,
|
| 16 |
+
representing maximum entropy scenarios. Stress-tests model robustness under
|
| 17 |
+
worst-case conditions where no structural assumptions hold.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
def generate(
|
| 21 |
+
self,
|
| 22 |
+
documents_by_type: Dict[str, List[DocumentAsset]],
|
| 23 |
+
doc_names_for_split: Dict[str, List[str]],
|
| 24 |
+
num_spliced_docs: int
|
| 25 |
+
) -> List[SplicedDocument]:
|
| 26 |
+
|
| 27 |
+
available_docs = self._get_available_docs(documents_by_type, doc_names_for_split)
|
| 28 |
+
doc_types = list(available_docs.keys())
|
| 29 |
+
|
| 30 |
+
if len(doc_types) < 2:
|
| 31 |
+
raise ValueError("Need at least 2 document types for multi-category strategy")
|
| 32 |
+
|
| 33 |
+
spliced_documents = []
|
| 34 |
+
|
| 35 |
+
for i in range(num_spliced_docs):
|
| 36 |
+
# Set target page count
|
| 37 |
+
target_pages = self.rng.randint(self.min_pages, self.max_pages)
|
| 38 |
+
|
| 39 |
+
# Collect entire documents from different types
|
| 40 |
+
selected_docs = []
|
| 41 |
+
used_types = set()
|
| 42 |
+
total_pages = 0
|
| 43 |
+
|
| 44 |
+
while total_pages < target_pages:
|
| 45 |
+
available_types = [t for t in doc_types if t not in used_types]
|
| 46 |
+
if not available_types:
|
| 47 |
+
break
|
| 48 |
+
|
| 49 |
+
doc_type = self.rng.choice(available_types)
|
| 50 |
+
used_types.add(doc_type)
|
| 51 |
+
|
| 52 |
+
available = [d for d in available_docs[doc_type] if d.page_count <= self.max_pages]
|
| 53 |
+
if not available:
|
| 54 |
+
continue
|
| 55 |
+
|
| 56 |
+
doc = self.rng.choice(available)
|
| 57 |
+
|
| 58 |
+
if total_pages + doc.page_count > self.max_pages:
|
| 59 |
+
continue
|
| 60 |
+
|
| 61 |
+
selected_docs.append(doc)
|
| 62 |
+
total_pages += doc.page_count
|
| 63 |
+
|
| 64 |
+
# Collect all pages from selected documents
|
| 65 |
+
all_pages = []
|
| 66 |
+
for doc in selected_docs:
|
| 67 |
+
for page_num in range(1, doc.page_count + 1):
|
| 68 |
+
all_pages.append({
|
| 69 |
+
'doc_type': doc.doc_type,
|
| 70 |
+
'doc_name': doc.doc_name,
|
| 71 |
+
'source_page': page_num
|
| 72 |
+
})
|
| 73 |
+
|
| 74 |
+
# Fully shuffle all pages
|
| 75 |
+
self.rng.shuffle(all_pages)
|
| 76 |
+
|
| 77 |
+
# Build source documents and ground truth
|
| 78 |
+
source_docs_dict = {}
|
| 79 |
+
ground_truth = []
|
| 80 |
+
|
| 81 |
+
for idx, page_info in enumerate(all_pages, start=1):
|
| 82 |
+
doc_key = (page_info['doc_type'], page_info['doc_name'])
|
| 83 |
+
|
| 84 |
+
if doc_key not in source_docs_dict:
|
| 85 |
+
source_docs_dict[doc_key] = []
|
| 86 |
+
|
| 87 |
+
source_docs_dict[doc_key].append(page_info['source_page'])
|
| 88 |
+
|
| 89 |
+
ground_truth.append(GroundTruthPage(
|
| 90 |
+
page_num=idx,
|
| 91 |
+
doc_type=page_info['doc_type'],
|
| 92 |
+
source_doc=page_info['doc_name'],
|
| 93 |
+
source_page=page_info['source_page']
|
| 94 |
+
))
|
| 95 |
+
|
| 96 |
+
source_documents = [
|
| 97 |
+
SourceDocument(doc_type=doc_type, doc_name=doc_name, pages=pages)
|
| 98 |
+
for (doc_type, doc_name), pages in source_docs_dict.items()
|
| 99 |
+
]
|
| 100 |
+
|
| 101 |
+
if len(all_pages) >= self.min_pages:
|
| 102 |
+
spliced_doc = SplicedDocument(
|
| 103 |
+
spliced_doc_id=str(uuid.uuid4()),
|
| 104 |
+
source_documents=source_documents,
|
| 105 |
+
ground_truth=ground_truth,
|
| 106 |
+
total_pages=len(all_pages)
|
| 107 |
+
)
|
| 108 |
+
spliced_documents.append(spliced_doc)
|
| 109 |
+
|
| 110 |
+
logger.info(f"Generated {len(spliced_documents)} DocSplit-Poly-Rand documents")
|
| 111 |
+
return spliced_documents
|
src/benchmarks/services/shuffle_strategies/poly_seq.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
| 3 |
+
|
| 4 |
+
from typing import List, Dict
|
| 5 |
+
import uuid
|
| 6 |
+
from loguru import logger
|
| 7 |
+
|
| 8 |
+
from models import DocumentAsset, SplicedDocument, SourceDocument, GroundTruthPage
|
| 9 |
+
from services.shuffle_strategies.base_strategy import BaseStrategy
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class PolySeq(BaseStrategy):
|
| 13 |
+
"""DocSplit-Poly-Seq: Multi category documents concatenation sequentially.
|
| 14 |
+
|
| 15 |
+
Creates document packets by concatenating documents from different categories
|
| 16 |
+
without repetition while preserving page ordering. Simulates heterogeneous
|
| 17 |
+
document assembly scenarios such as medical claims processing.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
def generate(
|
| 21 |
+
self,
|
| 22 |
+
documents_by_type: Dict[str, List[DocumentAsset]],
|
| 23 |
+
doc_names_for_split: Dict[str, List[str]],
|
| 24 |
+
num_spliced_docs: int
|
| 25 |
+
) -> List[SplicedDocument]:
|
| 26 |
+
|
| 27 |
+
available_docs = self._get_available_docs(documents_by_type, doc_names_for_split)
|
| 28 |
+
doc_types = list(available_docs.keys())
|
| 29 |
+
|
| 30 |
+
if len(doc_types) < 2:
|
| 31 |
+
raise ValueError("Need at least 2 document types for multi-category strategy")
|
| 32 |
+
|
| 33 |
+
spliced_documents = []
|
| 34 |
+
|
| 35 |
+
for i in range(num_spliced_docs):
|
| 36 |
+
# Set target page count
|
| 37 |
+
target_pages = self.rng.randint(self.min_pages, self.max_pages)
|
| 38 |
+
|
| 39 |
+
# Keep adding entire documents from different types until reaching target
|
| 40 |
+
source_documents = []
|
| 41 |
+
ground_truth = []
|
| 42 |
+
current_page = 1
|
| 43 |
+
used_types = set()
|
| 44 |
+
|
| 45 |
+
while current_page - 1 < target_pages:
|
| 46 |
+
# Get available types not yet used
|
| 47 |
+
available_types = [t for t in doc_types if t not in used_types]
|
| 48 |
+
|
| 49 |
+
if not available_types:
|
| 50 |
+
break
|
| 51 |
+
|
| 52 |
+
# Pick a random type
|
| 53 |
+
doc_type = self.rng.choice(available_types)
|
| 54 |
+
used_types.add(doc_type)
|
| 55 |
+
|
| 56 |
+
# Pick a random document from this type
|
| 57 |
+
available = [d for d in available_docs[doc_type] if d.page_count <= self.max_pages]
|
| 58 |
+
if not available:
|
| 59 |
+
continue
|
| 60 |
+
|
| 61 |
+
doc = self.rng.choice(available)
|
| 62 |
+
|
| 63 |
+
if current_page - 1 + doc.page_count > self.max_pages:
|
| 64 |
+
continue
|
| 65 |
+
|
| 66 |
+
# Add all pages in original order
|
| 67 |
+
pages = list(range(1, doc.page_count + 1))
|
| 68 |
+
|
| 69 |
+
source_documents.append(SourceDocument(
|
| 70 |
+
doc_type=doc.doc_type,
|
| 71 |
+
doc_name=doc.doc_name,
|
| 72 |
+
pages=pages
|
| 73 |
+
))
|
| 74 |
+
|
| 75 |
+
for source_page in pages:
|
| 76 |
+
ground_truth.append(GroundTruthPage(
|
| 77 |
+
page_num=current_page,
|
| 78 |
+
doc_type=doc.doc_type,
|
| 79 |
+
source_doc=doc.doc_name,
|
| 80 |
+
source_page=source_page
|
| 81 |
+
))
|
| 82 |
+
current_page += 1
|
| 83 |
+
|
| 84 |
+
if current_page - 1 >= self.min_pages:
|
| 85 |
+
spliced_doc = SplicedDocument(
|
| 86 |
+
spliced_doc_id=str(uuid.uuid4()),
|
| 87 |
+
source_documents=source_documents,
|
| 88 |
+
ground_truth=ground_truth,
|
| 89 |
+
total_pages=current_page - 1
|
| 90 |
+
)
|
| 91 |
+
spliced_documents.append(spliced_doc)
|
| 92 |
+
|
| 93 |
+
logger.info(f"Generated {len(spliced_documents)} DocSplit-Poly-Seq documents")
|
| 94 |
+
return spliced_documents
|
src/benchmarks/services/split_manager.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
| 2 |
+
# SPDX-License-Identifier: CC-BY-NC-4.0
|
| 3 |
+
|
| 4 |
+
import json
|
| 5 |
+
import random
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import Dict, List
|
| 8 |
+
from datetime import datetime
|
| 9 |
+
from loguru import logger
|
| 10 |
+
|
| 11 |
+
from models import DocumentAsset
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class SplitManager:
|
| 15 |
+
"""Manages train/test/validation splits for documents."""
|
| 16 |
+
|
| 17 |
+
def __init__(
|
| 18 |
+
self,
|
| 19 |
+
train_ratio: float = 0.6,
|
| 20 |
+
test_ratio: float = 0.25,
|
| 21 |
+
validation_ratio: float = 0.15,
|
| 22 |
+
random_seed: int = 42
|
| 23 |
+
):
|
| 24 |
+
self.train_ratio = train_ratio
|
| 25 |
+
self.test_ratio = test_ratio
|
| 26 |
+
self.validation_ratio = validation_ratio
|
| 27 |
+
self.random_seed = random_seed
|
| 28 |
+
|
| 29 |
+
if abs(train_ratio + test_ratio + validation_ratio - 1.0) > 0.001:
|
| 30 |
+
raise ValueError("Split ratios must sum to 1.0")
|
| 31 |
+
|
| 32 |
+
def create_split(
|
| 33 |
+
self,
|
| 34 |
+
documents_by_type: Dict[str, List[DocumentAsset]]
|
| 35 |
+
) -> Dict[str, Dict[str, List[str]]]:
|
| 36 |
+
"""Create stratified train/test/validation split.
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
Dict with structure: {split_name: {doc_type: [doc_names]}}
|
| 40 |
+
"""
|
| 41 |
+
random.seed(self.random_seed)
|
| 42 |
+
|
| 43 |
+
splits = {
|
| 44 |
+
'train': {},
|
| 45 |
+
'test': {},
|
| 46 |
+
'validation': {}
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
for doc_type, documents in documents_by_type.items():
|
| 50 |
+
doc_names = [doc.doc_name for doc in documents]
|
| 51 |
+
random.shuffle(doc_names)
|
| 52 |
+
|
| 53 |
+
total = len(doc_names)
|
| 54 |
+
train_end = int(total * self.train_ratio)
|
| 55 |
+
test_end = train_end + int(total * self.test_ratio)
|
| 56 |
+
|
| 57 |
+
splits['train'][doc_type] = doc_names[:train_end]
|
| 58 |
+
splits['test'][doc_type] = doc_names[train_end:test_end]
|
| 59 |
+
splits['validation'][doc_type] = doc_names[test_end:]
|
| 60 |
+
|
| 61 |
+
logger.info(f"Created split: train={self._count_docs(splits['train'])}, "
|
| 62 |
+
f"test={self._count_docs(splits['test'])}, "
|
| 63 |
+
f"val={self._count_docs(splits['validation'])}")
|
| 64 |
+
|
| 65 |
+
return splits
|
| 66 |
+
|
| 67 |
+
def save_split(self, splits: Dict, output_path: str):
|
| 68 |
+
"""Save split mapping to JSON."""
|
| 69 |
+
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
|
| 70 |
+
|
| 71 |
+
split_data = {
|
| 72 |
+
'created_at': datetime.now().isoformat(),
|
| 73 |
+
'split_config': {
|
| 74 |
+
'train_ratio': self.train_ratio,
|
| 75 |
+
'test_ratio': self.test_ratio,
|
| 76 |
+
'validation_ratio': self.validation_ratio,
|
| 77 |
+
'random_seed': self.random_seed,
|
| 78 |
+
'stratified': True
|
| 79 |
+
},
|
| 80 |
+
'splits': splits,
|
| 81 |
+
'statistics': {
|
| 82 |
+
'train': self._get_statistics(splits['train']),
|
| 83 |
+
'test': self._get_statistics(splits['test']),
|
| 84 |
+
'validation': self._get_statistics(splits['validation'])
|
| 85 |
+
}
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
with open(output_path, 'w') as f:
|
| 89 |
+
json.dump(split_data, f, indent=2)
|
| 90 |
+
|
| 91 |
+
logger.info(f"Saved split mapping to {output_path}")
|
| 92 |
+
|
| 93 |
+
def load_split(self, split_path: str) -> Dict[str, Dict[str, List[str]]]:
|
| 94 |
+
"""Load split mapping from JSON."""
|
| 95 |
+
with open(split_path, 'r') as f:
|
| 96 |
+
split_data = json.load(f)
|
| 97 |
+
|
| 98 |
+
logger.info(f"Loaded split mapping from {split_path}")
|
| 99 |
+
return split_data['splits']
|
| 100 |
+
|
| 101 |
+
def _count_docs(self, split: Dict[str, List[str]]) -> int:
|
| 102 |
+
"""Count total documents in a split."""
|
| 103 |
+
return sum(len(docs) for docs in split.values())
|
| 104 |
+
|
| 105 |
+
def _get_statistics(self, split: Dict[str, List[str]]) -> Dict[str, int]:
|
| 106 |
+
"""Get statistics for a split."""
|
| 107 |
+
stats = {'total': self._count_docs(split)}
|
| 108 |
+
for doc_type, docs in split.items():
|
| 109 |
+
stats[doc_type] = len(docs)
|
| 110 |
+
return stats
|