Files changed (1) hide show
  1. DocLayNet-base.py +53 -48
DocLayNet-base.py CHANGED
@@ -28,6 +28,7 @@ import os
28
  # import base64
29
  from PIL import Image
30
  import datasets
 
31
 
32
  # Find for instance the citation on arxiv or on the dataset repo/website
33
  _CITATION = """\
@@ -190,51 +191,55 @@ class DocLayNet(datasets.GeneratorBasedBuilder):
190
 
191
 
192
  def _generate_examples(self, filepath, split):
193
- logger.info("⏳ Generating examples from = %s", filepath)
194
- ann_dir = os.path.join(filepath, "annotations")
195
- img_dir = os.path.join(filepath, "images")
196
- # pdf_dir = os.path.join(filepath, "pdfs")
197
-
198
- for guid, file in enumerate(sorted(os.listdir(ann_dir))):
199
- texts = []
200
- bboxes_block = []
201
- bboxes_line = []
202
- categories = []
203
-
204
- # get json
205
- file_path = os.path.join(ann_dir, file)
206
- with open(file_path, "r", encoding="utf8") as f:
207
- data = json.load(f)
208
-
209
- # get image
210
- image_path = os.path.join(img_dir, file)
211
- image_path = image_path.replace("json", "png")
212
- image, size = load_image(image_path)
213
-
214
- # # get pdf
215
- # pdf_path = os.path.join(pdf_dir, file)
216
- # pdf_path = pdf_path.replace("json", "pdf")
217
- # with open(pdf_path, "rb") as pdf_file:
218
- # pdf_bytes = pdf_file.read()
219
- # pdf_encoded_string = base64.b64encode(pdf_bytes)
220
-
221
- for item in data["form"]:
222
- text_example, category_example, bbox_block_example, bbox_line_example = item["text"], item["category"], item["box"], item["box_line"]
223
- texts.append(text_example)
224
- categories.append(category_example)
225
- bboxes_block.append(bbox_block_example)
226
- bboxes_line.append(bbox_line_example)
227
-
228
- # get all metadadata
229
- page_hash = data["metadata"]["page_hash"]
230
- original_filename = data["metadata"]["original_filename"]
231
- page_no = data["metadata"]["page_no"]
232
- num_pages = data["metadata"]["num_pages"]
233
- original_width = data["metadata"]["original_width"]
234
- original_height = data["metadata"]["original_height"]
235
- coco_width = data["metadata"]["coco_width"]
236
- coco_height = data["metadata"]["coco_height"]
237
- collection = data["metadata"]["collection"]
238
- doc_category = data["metadata"]["doc_category"]
239
-
240
- yield guid, {"id": str(guid), "texts": texts, "bboxes_block": bboxes_block, "bboxes_line": bboxes_line, "categories": categories, "image": image, "page_hash": page_hash, "original_filename": original_filename, "page_no": page_no, "num_pages": num_pages, "original_width": original_width, "original_height": original_height, "coco_width": coco_width, "coco_height": coco_height, "collection": collection, "doc_category": doc_category}
 
 
 
 
 
28
  # import base64
29
  from PIL import Image
30
  import datasets
31
+ from concurrent.futures import ThreadPoolExecutor, as_completed
32
 
33
  # Find for instance the citation on arxiv or on the dataset repo/website
34
  _CITATION = """\
 
191
 
192
 
193
  def _generate_examples(self, filepath, split):
194
+ logger.info("⏳ Generating examples from = %s", filepath)
195
+ ann_dir = os.path.join(filepath, "annotations")
196
+ img_dir = os.path.join(filepath, "images")
197
+
198
+ def process_file(file):
199
+ texts = []
200
+ bboxes_block = []
201
+ bboxes_line = []
202
+ categories = []
203
+
204
+ file_path = os.path.join(ann_dir, file)
205
+ with open(file_path, "r", encoding="utf8") as f:
206
+ data = json.load(f)
207
+
208
+ image_path = os.path.join(img_dir, file.replace("json", "png"))
209
+ image, size = load_image(image_path)
210
+
211
+ for item in data["form"]:
212
+ text_example, category_example, bbox_block_example, bbox_line_example = item["text"], item["category"], item["box"], item["box_line"]
213
+ texts.append(text_example)
214
+ categories.append(category_example)
215
+ bboxes_block.append(bbox_block_example)
216
+ bboxes_line.append(bbox_line_example)
217
+
218
+ metadata = data["metadata"]
219
+ return {
220
+ "texts": texts,
221
+ "bboxes_block": bboxes_block,
222
+ "bboxes_line": bboxes_line,
223
+ "categories": categories,
224
+ "image": image,
225
+ "page_hash": metadata["page_hash"],
226
+ "original_filename": metadata["original_filename"],
227
+ "page_no": metadata["page_no"],
228
+ "num_pages": metadata["num_pages"],
229
+ "original_width": metadata["original_width"],
230
+ "original_height": metadata["original_height"],
231
+ "coco_width": metadata["coco_width"],
232
+ "coco_height": metadata["coco_height"],
233
+ "collection": metadata["collection"],
234
+ "doc_category": metadata["doc_category"]
235
+ }
236
+
237
+ with ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
238
+ future_to_file = {executor.submit(process_file, file): file for file in sorted(os.listdir(ann_dir))}
239
+ for guid, future in enumerate(as_completed(future_to_file)):
240
+ file = future_to_file[future]
241
+ try:
242
+ result = future.result()
243
+ yield guid, {"id": str(guid), **result}
244
+ except Exception as exc:
245
+ logger.error(f"Error processing {file}: {exc}")