import json import os from pathlib import Path import datasets from PIL import Image import pandas as pd logger = datasets.logging.get_logger(__name__) _CITATION = """\ @article{LayoutLmv3 for CV extractions, title={LayoutLmv3for Key Information Extraction}, author={MisaR&D Team}, year={2022}, } """ _DESCRIPTION = """\ CV is a collection of receipts. It contains, for each photo about cv personal, a list of OCRs - with the bounding box, text, and class. The goal is to benchmark "key information extraction" - extracting key information from documents https://arxiv.org/abs/2103.14470 """ def load_image(image_path): image = Image.open(image_path) w, h = image.size return image, (w,h) def normalize_bbox(bbox, size): return [ int(1000 * bbox[0] / size[0]), int(1000 * bbox[1] / size[1]), int(1000 * bbox[2] / size[0]), int(1000 * bbox[3] / size[1]), ] def _get_drive_url(url): base_url = 'https://drive.google.com/uc?id=' split_url = url.split('/') return base_url + split_url[7] _URLS = [ _get_drive_url("https://drive.google.com/drive/u/1/folders/1BBW2WicUqfZ5ck50N1AYY0krJoSJJQly"), ] class DatasetConfig(datasets.BuilderConfig): """BuilderConfig for WildReceipt Dataset""" def __init__(self, **kwargs): """BuilderConfig for WildReceipt Dataset. Args: **kwargs: keyword arguments forwarded to super. """ super(DatasetConfig, self).__init__(**kwargs) class WildReceipt(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ DatasetConfig(name="CV Extractions", version=datasets.Version("1.0.0"), description="CV dataset"), ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "words": datasets.Sequence(datasets.Value("string")), "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), "ner_tags": datasets.Sequence( datasets.features.ClassLabel( names=['person_name', 'dob_key', 'dob_value', 'gender_key', 'gender_value', 'phonenumber_key', 'phonenumber_value', 'email_key', 'email_value', 'address_key', 'address_value', 'socical_address_value', 'education', 'education_name', 'education_time', 'experience', 'experience_name', 'experience_time', 'information', 'undefined'] ) ), "image_path": datasets.Value("string"), } ), supervised_keys=None, citation=_CITATION, homepage="", ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" """Uses local files located with data_dir""" downloaded_file = dl_manager.download_and_extract(_URLS) dest = Path(downloaded_file[0])/'data1' return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": dest/"train.txt", "dest": dest} ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": dest/"test.txt", "dest": dest} ), ] def _generate_examples(self, filepath, dest): df = pd.read_csv(dest/'class_list.txt', delimiter='\s', header=None) id2labels = dict(zip(df[0].tolist(), df[1].tolist())) logger.info("⏳ Generating examples from = %s", filepath) item_list = [] with open(filepath, 'r') as f: for line in f: item_list.append(line.rstrip('\n\r')) for guid, fname in enumerate(item_list): data = json.loads(fname) image_path = dest/data['file_name'] image, size = load_image(image_path) boxes = [[i['box'][6], i['box'][7], i['box'][2], i['box'][3]] for i in data['annotations']] text = [i['text'] for i in data['annotations']] label = [id2labels[i['label']] for i in data['annotations']] boxes = [normalize_bbox(box, size) for box in boxes] flag=0 for i in boxes: for j in i: if j>1000: flag+=1 pass if flag>0: print(image_path) yield guid, {"id": str(guid), "words": text, "bboxes": boxes, "ner_tags": label, "image_path": image_path}