LauraExp commited on
Commit
1c6463b
1 Parent(s): 02f8b4e

Upload LILT2.py

Browse files
Files changed (1) hide show
  1. LILT2.py +191 -0
LILT2.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ from PIL import Image
5
+
6
+ import datasets
7
+ from datasets import load_dataset
8
+
9
+ def load_image(image_path):
10
+ image = Image.open(image_path).convert("RGB")
11
+ w, h = image.size
12
+ return image, (w, h)
13
+
14
+ def normalize_bbox(bbox, size):
15
+ return [
16
+ int(1000 * bbox[0] / size[0]),
17
+ int(1000 * bbox[1] / size[1]),
18
+ int(1000 * bbox[2] / size[0]),
19
+ int(1000 * bbox[3] / size[1]),
20
+ ]
21
+
22
+ logger = datasets.logging.get_logger(__name__)
23
+
24
+
25
+ _CITATION = """\
26
+
27
+ }
28
+ """
29
+
30
+ _DESCRIPTION = """\
31
+
32
+ """
33
+
34
+
35
+ class FunsdConfig(datasets.BuilderConfig):
36
+ """BuilderConfig for FUNSD"""
37
+
38
+ def __init__(self, **kwargs):
39
+ """BuilderConfig for FUNSD.
40
+ Args:
41
+ **kwargs: keyword arguments forwarded to super.
42
+ """
43
+ super(FunsdConfig, self).__init__(**kwargs)
44
+
45
+
46
+ class Funsd(datasets.GeneratorBasedBuilder):
47
+ """Conll2003 dataset."""
48
+
49
+ BUILDER_CONFIGS = [
50
+ FunsdConfig(name="funsd", version=datasets.Version("1.0.0"), description="FUNSD dataset"),
51
+ ]
52
+
53
+ def _info(self):
54
+ return datasets.DatasetInfo(
55
+ description=_DESCRIPTION,
56
+ features=datasets.Features(
57
+ {
58
+ "id": datasets.Value("string"),
59
+ "tokens": datasets.Sequence(datasets.Value("string")),
60
+ "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
61
+ "ner_tags": datasets.Sequence(
62
+ datasets.features.ClassLabel(
63
+ names = [ "S-ANSWER_EXP",
64
+ "S-ANSWER_FECHA_SERVICIO",
65
+ "S-ANSWER_HORA_SERVICIO",
66
+ "S-ANSWER_SALA",
67
+ "B-ANSWER_NOMBRE1",
68
+ "I-ANSWER_NOMBRE1",
69
+ "E-ANSWER_NOMBRE1",
70
+ "B-ANSWER_DIRECCION",
71
+ "I-ANSWER_DIRECCION",
72
+ "E-ANSWER_DIRECCION",
73
+ "B-ANSWER_POBLACION",
74
+ "I-ANSWER_POBLACION",
75
+ "E-ANSWER_POBLACION",
76
+ "S-ANSWER_DNI",
77
+ "S-ANSWER_TELEFONO",
78
+ "S-ANSWER_EDAD",
79
+ "S-ANSWER_NACIMIENTO_DIF",
80
+ "S-ANSWER_ESTADO_CIVIL_DIF",
81
+ "S-ANSWER_FECHA_DEF",
82
+ "B-ANSWER_LUGAR_DEF",
83
+ "I-ANSWER_LUGAR_DEF",
84
+ "E-ANSWER_LUGAR_DEF",
85
+ "S-ANSWER_NATURAL_DE_DIF",
86
+ "B-ANSWER_PADRES_DIF",
87
+ "I-ANSWER_PADRES_DIF",
88
+ "E-ANSWER_PADRES_DIF",
89
+ "B-ANSWER_NOMBRE_TITULAR",
90
+ "I-ANSWER_NOMBRE_TITULAR",
91
+ "E-ANSWER_NOMBRE_TITULAR",
92
+ "S-ANSWER_AUT_DNI_TITULAR",
93
+ "B-ANSWER_DIRECCION_TITULAR",
94
+ "I-ANSWER_DIRECCION_TITULAR",
95
+ "E-ANSWER_DIRECCION_TITULAR",
96
+ "B-ANSWER_POBLACION_TITULAR",
97
+ "I-ANSWER_POBLACION_TITULAR",
98
+ "E-ANSWER_POBLACION_TITULAR",
99
+ "B-ANSWER_AUTORIZACION_TITULAR",
100
+ "I-ANSWER_AUTORIZACION_TITULAR",
101
+ "E-ANSWER_AUTORIZACION_TITULAR",
102
+ "S-ANSWER_DNI_TITULAR",
103
+ "S-ANSWER_HORA_DEFUNCION",
104
+ "B-ANSWER_DESCRIPCION",
105
+ "I-ANSWER_DESCRIPCION",
106
+ "E-ANSWER_DESCRIPCION",
107
+ "B-ANSWER_NOMBRE",
108
+ "I-ANSWER_NOMBRE",
109
+ "E-ANSWER_NOMBRE",
110
+ "S-ANSWER_CANTIDAD",
111
+ "S-ANSWER_IMPORTE"]
112
+ )
113
+ ),
114
+ "image": datasets.features.Image(),
115
+ }
116
+ ),
117
+ supervised_keys=None,
118
+ homepage="",
119
+ citation=_CITATION,
120
+ )
121
+
122
+ def _split_generators(self, dl_manager):
123
+ """Returns SplitGenerators."""
124
+ downloaded_file = dl_manager.download_and_extract("https://huggingface.co/datasets/LauraExp/LILT2/resolve/main/Data.zip")
125
+ return [
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/Data/training_data/"}
128
+ ),
129
+ datasets.SplitGenerator(
130
+ name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/Data/testing_data/"}
131
+ ),
132
+ ]
133
+
134
+ def get_line_bbox(self, bboxs):
135
+ x = [bboxs[i][j] for i in range(len(bboxs)) for j in range(0, len(bboxs[i]), 2)]
136
+ y = [bboxs[i][j] for i in range(len(bboxs)) for j in range(1, len(bboxs[i]), 2)]
137
+
138
+ x0, y0, x1, y1 = min(x), min(y), max(x), max(y)
139
+
140
+ assert x1 >= x0 and y1 >= y0
141
+ bbox = [[x0, y0, x1, y1] for _ in range(len(bboxs))]
142
+ return bbox
143
+
144
+ def _generate_examples(self, filepath):
145
+ logger.info("⏳ Generating examples from = %s", filepath)
146
+ ann_dir = os.path.join(filepath, "annotations")
147
+ img_dir = os.path.join(filepath, "images")
148
+ for guid, file in enumerate(sorted(os.listdir(ann_dir))):
149
+ tokens = []
150
+ bboxes = []
151
+ ner_tags = []
152
+
153
+ file_path = os.path.join(ann_dir, file)
154
+ with open(file_path, "r", encoding="utf8") as f:
155
+ data = json.load(f)
156
+ image_path = os.path.join(img_dir, file)
157
+ image_path = image_path.replace("json", "png")
158
+ image, size = load_image(image_path)
159
+ for item in data["form"]:
160
+ words_example, label = item["words"], item["label"]
161
+ words_example = [w for w in words_example if w["text"].strip() != ""]
162
+ if len(words_example) == 0:
163
+ continue
164
+ if label == "other":
165
+ for w in words_example:
166
+ tokens.append(w["text"])
167
+ ner_tags.append("O")
168
+ bboxes.append(normalize_bbox(w["box"], size))
169
+ else:
170
+ if len(words_example) == 1:
171
+ tokens.append(words_example[0]["text"])
172
+ ner_tags.append("S-" + label.upper())
173
+ bboxes.append(normalize_bbox(words_example[0]["box"], size))
174
+ else:
175
+ tokens.append(words_example[0]["text"])
176
+ ner_tags.append("B-" + label.upper())
177
+ bboxes.append(normalize_bbox(words_example[0]["box"], size))
178
+ for w in words_example[1:]:
179
+ tokens.append(w["text"])
180
+ ner_tags.append("I-" + label.upper())
181
+ bboxes.append(normalize_bbox(w["box"], size))
182
+ tokens.append(words_example[-1]["text"])
183
+ ner_tags.append("E-" + label.upper())
184
+ bboxes.append(normalize_bbox(words_example[-1]["box"], size))
185
+ cur_line_bboxes = self.get_line_bbox(cur_line_bboxes)
186
+ bboxes.extend(cur_line_bboxes)
187
+ yield guid, {"id": str(guid), "tokens": tokens, "bboxes": bboxes, "ner_tags": ner_tags,
188
+ "image": image}
189
+
190
+
191
+