File size: 4,672 Bytes
d79b683 0aa286b bcd5d3f 0aa286b bcd5d3f 0aa286b d79b683 0aa286b d79b683 0aa286b d79b683 0aa286b d79b683 0aa286b d79b683 93654d0 0aa286b d79b683 0aa286b dd1bcd5 d79b683 0aa286b d79b683 0aa286b d79b683 0aa286b d79b683 0aa286b d79b683 f583105 d79b683 f583105 d79b683 f583105 d79b683 bcd5d3f d79b683 0aa286b d79b683 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
import json
import os
from pathlib import Path
import datasets
from PIL import Image
import pandas as pd
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@article{LayoutLmv3 for CV extractions,
title={LayoutLmv3for Key Information Extraction},
author={MisaR&D Team},
year={2022},
}
"""
_DESCRIPTION = """\
CV is a collection of receipts. It contains, for each photo about cv personal, a list of OCRs - with the bounding box, text, and class. The goal is to benchmark "key information extraction" - extracting key information from documents
https://arxiv.org/abs/2103.14470
"""
def load_image(image_path):
image = Image.open(image_path)
w, h = image.size
return image, (w,h)
def normalize_bbox(bbox, size):
return [
int(1000 * bbox[0] / size[0]),
int(1000 * bbox[1] / size[1]),
int(1000 * bbox[2] / size[0]),
int(1000 * bbox[3] / size[1]),
]
def _get_drive_url(url):
base_url = 'https://drive.google.com/uc?id='
split_url = url.split('/')
return base_url + split_url[5]
_URLS = [
_get_drive_url("https://drive.google.com/file/d/11SRDeRKUr8XacB7tauiGjkw1PXDGFKUx/")
_get_drive_url("https://drive.google.com/file/d/1KdDBmGP96lFc7jv2Bf4eqrO121ST-TCh/"),
]
class DatasetConfig(datasets.BuilderConfig):
"""BuilderConfig for WildReceipt Dataset"""
def __init__(self, **kwargs):
"""BuilderConfig for WildReceipt Dataset.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(DatasetConfig, self).__init__(**kwargs)
class WildReceipt(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
DatasetConfig(name="CV Extractions", version=datasets.Version("1.0.0"), description="CV dataset"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"words": datasets.Sequence(datasets.Value("string")),
"bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=['person_name', 'dob_key', 'dob_value', 'gender_key', 'gender_value', 'phonenumber_key', 'phonenumber_value', 'email_key', 'email_value', 'address_key', 'address_value', 'socical_address_value', 'education', 'education_name', 'education_time', 'experience', 'experience_name', 'experience_time', 'information', 'undefined']
)
),
"image_path": datasets.Value("string"),
}
),
supervised_keys=None,
citation=_CITATION,
homepage="",
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
"""Uses local files located with data_dir"""
downloaded_file = dl_manager.download_and_extract(_URLS)
dest = Path(downloaded_file[0])/'data1'
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepath": dest/"train.txt", "dest": dest}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"filepath": dest/"test.txt", "dest": dest}
),
]
def _generate_examples(self, filepath, dest):
df = pd.read_csv(dest/'class_list.txt', delimiter='\s', header=None)
id2labels = dict(zip(df[0].tolist(), df[1].tolist()))
logger.info("⏳ Generating examples from = %s", filepath)
item_list = []
with open(filepath, 'r') as f:
for line in f:
item_list.append(line.rstrip('\n\r'))
for guid, fname in enumerate(item_list):
data = json.loads(fname)
image_path = dest/data['file_name']
image, size = load_image(image_path)
boxes = [[i['box'][6], i['box'][7], i['box'][2], i['box'][3]] for i in data['annotations']]
text = [i['text'] for i in data['annotations']]
label = [id2labels[i['label']] for i in data['annotations']]
boxes = [normalize_bbox(box, size) for box in boxes]
flag=0
for i in boxes:
for j in i:
if j>1000:
flag+=1
pass
if flag>0: print(image_path)
yield guid, {"id": str(guid), "words": text, "bboxes": boxes, "ner_tags": label, "image_path": image_path} |