|
|
|
''' |
|
Reference: https://huggingface.co/datasets/nielsr/funsd/blob/main/funsd.py |
|
''' |
|
import ast |
|
import os |
|
import random |
|
|
|
import datasets |
|
import matplotlib.pyplot as plt |
|
import pandas as pd |
|
from pdf2image import convert_from_path |
|
from PIL import Image |
|
|
|
|
|
def load_image(image_path): |
|
image = Image.open(image_path).convert("RGB") |
|
w, h = image.size |
|
return image, (w, h) |
|
|
|
def normalize_bbox(bbox, size): |
|
return [ |
|
int(1000 * bbox[0] / size[0]), |
|
int(1000 * bbox[1] / size[1]), |
|
int(1000 * bbox[2] / size[0]), |
|
int(1000 * bbox[3] / size[1]), |
|
] |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
_CITATION = """\ |
|
@article{Jaume2019FUNSDAD, |
|
title={FUNSD: A Dataset for Form Understanding in Noisy Scanned Documents}, |
|
author={Guillaume Jaume and H. K. Ekenel and J. Thiran}, |
|
journal={2019 International Conference on Document Analysis and Recognition Workshops (ICDARW)}, |
|
year={2019}, |
|
volume={2}, |
|
pages={1-6} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
https://guillaumejaume.github.io/FUNSD/ |
|
""" |
|
|
|
|
|
class FunsdConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for FUNSD""" |
|
|
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for FUNSD. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(FunsdConfig, self).__init__(**kwargs) |
|
|
|
|
|
|
|
class Funsd(datasets.GeneratorBasedBuilder): |
|
"""Conll2003 dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
FunsdConfig(name="funsd", version=datasets.Version("1.0.0"), description="FUNSD dataset"), |
|
] |
|
|
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
"bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), |
|
"segment_class": datasets.Sequence( |
|
datasets.features.ClassLabel( |
|
names=["O", "B-PARTES","I-PARTES", "B-EMENTA","I-EMENTA", "B-ACORDAO","I-ACORDAO", "B-RELATORIO","I-RELATORIO", "B-VOTO", "I-VOTO"] |
|
|
|
) |
|
), |
|
"image": datasets.features.Image(), |
|
} |
|
), |
|
supervised_keys=None, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
downloaded_file = dl_manager.download_and_extract("http://direitodigital.ufms.br:8000/direitodigital_dev.zip") |
|
"""Returns SplitGenerators.""" |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.NamedSplit('trainmini_stf'), gen_kwargs={"filepath": f"{downloaded_file}/trainmini/stf"} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.NamedSplit('dev_stf'), gen_kwargs={"filepath": f"{downloaded_file}/dev/stf"} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.NamedSplit('trainmini_stj'), gen_kwargs={"filepath": f"{downloaded_file}/trainmini/stj"} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.NamedSplit('dev_stj'), gen_kwargs={"filepath": f"{downloaded_file}/dev/stj"} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.NamedSplit('trainmini_trf2'), gen_kwargs={"filepath": f"{downloaded_file}/trainmini/trf2"} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.NamedSplit('dev_trf2'), gen_kwargs={"filepath": f"{downloaded_file}/dev/trf2"} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.NamedSplit('trainmini_tjpb'), gen_kwargs={"filepath": f"{downloaded_file}/trainmini/tjpb"} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.NamedSplit('dev_tjpb'), gen_kwargs={"filepath": f"{downloaded_file}/dev/tjpb"} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.NamedSplit('trainmini_tjmg'), gen_kwargs={"filepath": f"{downloaded_file}/trainmini/tjmg"} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.NamedSplit('dev_tjmg'), gen_kwargs={"filepath": f"{downloaded_file}/dev/tjmg"} |
|
) |
|
] |
|
|
|
def get_line_bbox(self, bboxs): |
|
x = [bboxs[i][j] for i in range(len(bboxs)) for j in range(0, len(bboxs[i]), 2)] |
|
y = [bboxs[i][j] for i in range(len(bboxs)) for j in range(1, len(bboxs[i]), 2)] |
|
|
|
x0, y0, x1, y1 = min(x), min(y), max(x), max(y) |
|
|
|
assert x1 >= x0 and y1 >= y0 |
|
bbox = [[x0, y0, x1, y1] for _ in range(len(bboxs))] |
|
return bbox |
|
|
|
def _generate_examples(): |
|
guid = 0 |
|
|
|
file_paths = [ |
|
os.path.join(root, filename) |
|
for root, dirs, files in os.walk(dataset_path) |
|
for filename in files |
|
if filename.endswith('.tsv') |
|
] |
|
random.shuffle(file_paths) |
|
|
|
|
|
|
|
for tsv_name in file_paths: |
|
|
|
|
|
|
|
pdf_name = tsv_name.replace('/LayoutLM_dataset/', '/pdfs/').replace('.tsv', '.pdf') |
|
|
|
img_path = tsv_name.replace('.tsv','') |
|
pages_img = convert_from_path(pdf_name, size=(762,1000),fmt="png") |
|
|
|
dataframe = pd.read_csv(tsv_name ,delimiter='\t', keep_default_na=False).replace(["None","SUMULA","CERTIDAO_DE_JULGAMENTO","AUTUACAO","CERTIDAO","EXTRATO_DE_ATA"], 'OUTROS') |
|
for page in dataframe['page'].unique(): |
|
|
|
image, size = pages_img[page-1], pages_img[page-1].size |
|
|
|
data = (dataframe[dataframe["page"] == page]) |
|
form = [] |
|
for index, row in data.iterrows(): |
|
tokens = [] |
|
for token in ast.literal_eval(row['tokens']): |
|
|
|
tokens.append({ |
|
'box' : |
|
[token['x'], token['y'], token['x']+token['width'], token['y'] + token['height']], |
|
'text' : token['text'] |
|
}) |
|
line_dict = { |
|
'text': row['text'], |
|
'box': [row['x'], row['y'], row['x']+row['width'], row['y'] + row['height']], |
|
'label': row['label'], |
|
'words': tokens |
|
} |
|
form.append(line_dict) |
|
yield from self.get_form(guid, image, size, form) |
|
guid += 1 |
|
|
|
|
|
def get_form(self, guid, image, size, form): |
|
tokens = [] |
|
bboxes = [] |
|
segment_class = [] |
|
|
|
for item in form: |
|
cur_line_bboxes = [] |
|
words, label = item["words"], item["label"] |
|
words = [w for w in words if w["text"].strip() != ""] |
|
if len(words) == 0: |
|
continue |
|
if label == "OUTROS": |
|
for w in words: |
|
tokens.append(w["text"]) |
|
segment_class.append("O") |
|
|
|
cur_line_bboxes.append(normalize_bbox(w["box"], size)) |
|
else: |
|
tokens.append(words[0]["text"]) |
|
segment_class.append("B-" + label.upper()) |
|
cur_line_bboxes.append(normalize_bbox(words[0]["box"], size)) |
|
for w in words[1:]: |
|
tokens.append(w["text"]) |
|
segment_class.append("I-" + label.upper()) |
|
cur_line_bboxes.append(normalize_bbox(w["box"], size)) |
|
cur_line_bboxes = self.get_line_bbox(cur_line_bboxes) |
|
bboxes.extend(cur_line_bboxes) |
|
yield guid, {"id": str(guid), "tokens": tokens, "bboxes": bboxes, "segment_class": segment_class, |
|
"image": image} |
|
|
|
def main(): |
|
dataset = Funsd() |
|
for example in dataset._generate_examples('/home/marlon/LayoutLM_dataset/trainmini'): |
|
print(example) |
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
main() |