import json import os from PIL import Image import datasets from datasets import load_dataset def load_image(image_path): image = Image.open(image_path).convert("RGB") w, h = image.size return image, (w, h) def normalize_bbox(bbox, size): return [ int(1000 * bbox[0] / size[0]), int(1000 * bbox[1] / size[1]), int(1000 * bbox[2] / size[0]), int(1000 * bbox[3] / size[1]), ] logger = datasets.logging.get_logger(__name__) _CITATION = """\ } """ _DESCRIPTION = """\ """ class FunsdConfig(datasets.BuilderConfig): """BuilderConfig for FUNSD""" def __init__(self, **kwargs): """BuilderConfig for FUNSD. Args: **kwargs: keyword arguments forwarded to super. """ super(FunsdConfig, self).__init__(**kwargs) class Funsd(datasets.GeneratorBasedBuilder): """Conll2003 dataset.""" BUILDER_CONFIGS = [ FunsdConfig(name="funsd", version=datasets.Version("1.0.0"), description="FUNSD dataset"), ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "tokens": datasets.Sequence(datasets.Value("string")), "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), "ner_tags": datasets.Sequence( datasets.features.ClassLabel( names = [ "S-ANSWER_EXP", "S-ANSWER_FECHA_SERVICIO", "S-ANSWER_HORA_SERVICIO", "S-ANSWER_SALA", "B-ANSWER_NOMBRE1", "I-ANSWER_NOMBRE1", "E-ANSWER_NOMBRE1", "B-ANSWER_DIRECCION", "I-ANSWER_DIRECCION", "E-ANSWER_DIRECCION", "B-ANSWER_POBLACION", "I-ANSWER_POBLACION", "E-ANSWER_POBLACION", "S-ANSWER_DNI", "S-ANSWER_TELEFONO", "S-ANSWER_EDAD", "S-ANSWER_NACIMIENTO_DIF", "S-ANSWER_ESTADO_CIVIL_DIF", "S-ANSWER_FECHA_DEF", "B-ANSWER_LUGAR_DEF", "I-ANSWER_LUGAR_DEF", "E-ANSWER_LUGAR_DEF", "S-ANSWER_NATURAL_DE_DIF", "B-ANSWER_PADRES_DIF", "I-ANSWER_PADRES_DIF", "E-ANSWER_PADRES_DIF", "B-ANSWER_NOMBRE_TITULAR", "I-ANSWER_NOMBRE_TITULAR", "E-ANSWER_NOMBRE_TITULAR", "S-ANSWER_AUT_DNI_TITULAR", "B-ANSWER_DIRECCION_TITULAR", "I-ANSWER_DIRECCION_TITULAR", "E-ANSWER_DIRECCION_TITULAR", "B-ANSWER_POBLACION_TITULAR", "I-ANSWER_POBLACION_TITULAR", "E-ANSWER_POBLACION_TITULAR", "B-ANSWER_AUTORIZACION_TITULAR", "I-ANSWER_AUTORIZACION_TITULAR", "E-ANSWER_AUTORIZACION_TITULAR", "S-ANSWER_DNI_TITULAR", "S-ANSWER_HORA_DEFUNCION", "B-ANSWER_DESCRIPCION", "I-ANSWER_DESCRIPCION", "E-ANSWER_DESCRIPCION", "B-ANSWER_NOMBRE", "I-ANSWER_NOMBRE", "E-ANSWER_NOMBRE", "S-ANSWER_CANTIDAD", "S-ANSWER_IMPORTE"] ) ), "image": datasets.features.Image(), } ), supervised_keys=None, homepage="", citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" downloaded_file = dl_manager.download_and_extract("https://huggingface.co/datasets/LauraExp/LILT2/resolve/main/Data.zip") return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/Data/training_data/"} ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/Data/testing_data/"} ), ] def get_line_bbox(self, bboxs): x = [bboxs[i][j] for i in range(len(bboxs)) for j in range(0, len(bboxs[i]), 2)] y = [bboxs[i][j] for i in range(len(bboxs)) for j in range(1, len(bboxs[i]), 2)] x0, y0, x1, y1 = min(x), min(y), max(x), max(y) assert x1 >= x0 and y1 >= y0 bbox = [[x0, y0, x1, y1] for _ in range(len(bboxs))] return bbox def _generate_examples(self, filepath): logger.info("⏳ Generating examples from = %s", filepath) ann_dir = os.path.join(filepath, "annotations") img_dir = os.path.join(filepath, "images") for guid, file in enumerate(sorted(os.listdir(ann_dir))): tokens = [] bboxes = [] ner_tags = [] file_path = os.path.join(ann_dir, file) with open(file_path, "r", encoding="utf8") as f: data = json.load(f) image_path = os.path.join(img_dir, file) image_path = image_path.replace("json", "png") image, size = load_image(image_path) for item in data["form"]: words_example, label = item["words"], item["label"] words_example = [w for w in words_example if w["text"].strip() != ""] if len(words_example) == 0: continue if label == "other": for w in words_example: tokens.append(w["text"]) ner_tags.append("O") bboxes.append(normalize_bbox(w["box"], size)) else: if len(words_example) == 1: tokens.append(words_example[0]["text"]) ner_tags.append("S-" + label.upper()) bboxes.append(normalize_bbox(words_example[0]["box"], size)) else: tokens.append(words_example[0]["text"]) ner_tags.append("B-" + label.upper()) bboxes.append(normalize_bbox(words_example[0]["box"], size)) for w in words_example[1:]: tokens.append(w["text"]) ner_tags.append("I-" + label.upper()) bboxes.append(normalize_bbox(w["box"], size)) tokens.append(words_example[-1]["text"]) ner_tags.append("E-" + label.upper()) bboxes.append(normalize_bbox(words_example[-1]["box"], size)) cur_line_bboxes = self.get_line_bbox(cur_line_bboxes) bboxes.extend(cur_line_bboxes) yield guid, {"id": str(guid), "tokens": tokens, "bboxes": bboxes, "ner_tags": ner_tags, "image": image}