Keep only the largest layer
Browse files
QUAERO.py
CHANGED
@@ -13,12 +13,12 @@ logger = datasets.logging.get_logger(__name__)
|
|
13 |
_CITATION = """
|
14 |
@InProceedings{neveol14quaero,
|
15 |
author = {Névéol, Aurélie and Grouin, Cyril and Leixa, Jeremy
|
16 |
-
|
17 |
title = {The {QUAERO} {French} Medical Corpus: A Ressource for
|
18 |
-
|
19 |
OPTbooktitle = {Proceedings of the Fourth Workshop on Building
|
20 |
-
|
21 |
-
|
22 |
booktitle = {Proc of BioTextMining Work},
|
23 |
OPTseries = {BioTxtM 2014},
|
24 |
year = {2014},
|
@@ -52,225 +52,266 @@ BioCreative IV track 1 - BioC: The BioCreative Interoperability Initiative, 2013
|
|
52 |
Please note that the original version of the QUAERO corpus distributed in the CLEF eHealth challenge 2015 and 2016 came in the BRAT stand alone format. It was distributed with the CLEF eHealth evaluation tool. This original distribution of the QUAERO French Medical corpus is available separately from https://quaerofrenchmed.limsi.fr
|
53 |
All questions regarding the task or data should be addressed to aurelie.neveol@limsi.fr
|
54 |
"""
|
55 |
-
|
56 |
_LABELS_BASE = ['DISO', 'DEVI', 'CHEM', 'GEOG', 'OBJC', 'PHEN', 'PHYS', 'LIVB', 'PROC', 'ANAT']
|
57 |
|
58 |
class QUAERO(datasets.GeneratorBasedBuilder):
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
_CITATION = """
|
14 |
@InProceedings{neveol14quaero,
|
15 |
author = {Névéol, Aurélie and Grouin, Cyril and Leixa, Jeremy
|
16 |
+
and Rosset, Sophie and Zweigenbaum, Pierre},
|
17 |
title = {The {QUAERO} {French} Medical Corpus: A Ressource for
|
18 |
+
Medical Entity Recognition and Normalization},
|
19 |
OPTbooktitle = {Proceedings of the Fourth Workshop on Building
|
20 |
+
and Evaluating Ressources for Health and Biomedical
|
21 |
+
Text Processing},
|
22 |
booktitle = {Proc of BioTextMining Work},
|
23 |
OPTseries = {BioTxtM 2014},
|
24 |
year = {2014},
|
|
|
52 |
Please note that the original version of the QUAERO corpus distributed in the CLEF eHealth challenge 2015 and 2016 came in the BRAT stand alone format. It was distributed with the CLEF eHealth evaluation tool. This original distribution of the QUAERO French Medical corpus is available separately from https://quaerofrenchmed.limsi.fr
|
53 |
All questions regarding the task or data should be addressed to aurelie.neveol@limsi.fr
|
54 |
"""
|
55 |
+
|
56 |
_LABELS_BASE = ['DISO', 'DEVI', 'CHEM', 'GEOG', 'OBJC', 'PHEN', 'PHYS', 'LIVB', 'PROC', 'ANAT']
|
57 |
|
58 |
class QUAERO(datasets.GeneratorBasedBuilder):
|
59 |
+
"""QUAERO dataset."""
|
60 |
+
|
61 |
+
VERSION = datasets.Version("1.0.0")
|
62 |
+
|
63 |
+
BUILDER_CONFIGS = [
|
64 |
+
datasets.BuilderConfig(name="emea", version=VERSION, description="The EMEA QUAERO corpora"),
|
65 |
+
datasets.BuilderConfig(name="medline", version=VERSION, description="The MEDLINE QUAERO corpora"),
|
66 |
+
]
|
67 |
+
|
68 |
+
DEFAULT_CONFIG_NAME = "emea"
|
69 |
+
|
70 |
+
def _info(self):
|
71 |
+
|
72 |
+
if self.config.name == "emea":
|
73 |
+
|
74 |
+
return datasets.DatasetInfo(
|
75 |
+
description=_DESCRIPTION,
|
76 |
+
features=datasets.Features(
|
77 |
+
{
|
78 |
+
"id": datasets.Value("string"),
|
79 |
+
"document_id": datasets.Value("string"),
|
80 |
+
"tokens": datasets.Sequence(datasets.Value("string")),
|
81 |
+
"ner_tags": datasets.Sequence(
|
82 |
+
datasets.features.ClassLabel(
|
83 |
+
names = ['O', 'B-LIVB', 'I-LIVB', 'B-PROC', 'I-PROC', 'B-ANAT', 'I-ANAT', 'B-DEVI', 'I-DEVI', 'B-CHEM', 'I-CHEM', 'B-GEOG', 'I-GEOG', 'B-PHYS', 'I-PHYS', 'B-PHEN', 'I-PHEN', 'B-DISO', 'I-DISO', 'B-OBJC', 'I-OBJC'],
|
84 |
+
)
|
85 |
+
),
|
86 |
+
}
|
87 |
+
),
|
88 |
+
supervised_keys=None,
|
89 |
+
homepage="https://quaerofrenchmed.limsi.fr/",
|
90 |
+
citation=_CITATION,
|
91 |
+
license=_LICENSE,
|
92 |
+
)
|
93 |
+
|
94 |
+
elif self.config.name == "medline":
|
95 |
+
return datasets.DatasetInfo(
|
96 |
+
description=_DESCRIPTION,
|
97 |
+
features=datasets.Features(
|
98 |
+
{
|
99 |
+
"id": datasets.Value("string"),
|
100 |
+
"document_id": datasets.Value("string"),
|
101 |
+
"tokens": datasets.Sequence(datasets.Value("string")),
|
102 |
+
"ner_tags": datasets.Sequence(
|
103 |
+
datasets.features.ClassLabel(
|
104 |
+
names = ['O', 'B-LIVB', 'I-LIVB', 'B-PROC', 'I-PROC', 'B-ANAT', 'I-ANAT', 'B-DEVI', 'I-DEVI', 'B-CHEM', 'I-CHEM', 'B-GEOG', 'I-GEOG', 'B-PHYS', 'I-PHYS', 'B-PHEN', 'I-PHEN', 'B-DISO', 'I-DISO', 'B-OBJC', 'I-OBJC'],
|
105 |
+
)
|
106 |
+
),
|
107 |
+
}
|
108 |
+
),
|
109 |
+
supervised_keys=None,
|
110 |
+
homepage="https://quaerofrenchmed.limsi.fr/",
|
111 |
+
citation=_CITATION,
|
112 |
+
license=_LICENSE,
|
113 |
+
)
|
114 |
+
|
115 |
+
def _split_generators(self, dl_manager):
|
116 |
+
|
117 |
+
return [
|
118 |
+
datasets.SplitGenerator(
|
119 |
+
name=datasets.Split.TRAIN,
|
120 |
+
gen_kwargs={
|
121 |
+
"split": "train",
|
122 |
+
}
|
123 |
+
),
|
124 |
+
datasets.SplitGenerator(
|
125 |
+
name=datasets.Split.VALIDATION,
|
126 |
+
gen_kwargs={
|
127 |
+
"split": "validation",
|
128 |
+
}
|
129 |
+
),
|
130 |
+
datasets.SplitGenerator(
|
131 |
+
name=datasets.Split.TEST,
|
132 |
+
gen_kwargs={
|
133 |
+
"split": "test",
|
134 |
+
}
|
135 |
+
),
|
136 |
+
]
|
137 |
+
|
138 |
+
def split_sentences(self, json_o):
|
139 |
+
"""
|
140 |
+
Split le corpus en phrase plus courtes pour que ça fit dans des modèles types BERT
|
141 |
+
|
142 |
+
Le split est fait sur les points "."
|
143 |
+
|
144 |
+
"""
|
145 |
+
|
146 |
+
final_json = []
|
147 |
+
|
148 |
+
for i in json_o:
|
149 |
+
|
150 |
+
ind_punc = [index for index, value in enumerate(i['tokens']) if value=='.'] + [len(i['tokens'])]
|
151 |
+
# ind_punc = [index for index, value in enumerate(i['tokens']) if value=='.' and not str(i['tokens'][index-1]).isnumeric()]
|
152 |
+
|
153 |
+
for index, value in enumerate(ind_punc):
|
154 |
+
|
155 |
+
if index==0:
|
156 |
+
final_json.append({'id': i['id']+'_'+str(index),
|
157 |
+
'document_id': i['id']+'_'+str(index),
|
158 |
+
'ner_tags': i['ner_tags'][:value+1],
|
159 |
+
'tokens': i['tokens'][:value+1]
|
160 |
+
})
|
161 |
+
else:
|
162 |
+
prev_value = ind_punc[index-1]
|
163 |
+
final_json.append({'id': i['id']+'_'+str(index),
|
164 |
+
'document_id': i['document_id']+'_'+str(index),
|
165 |
+
'ner_tags': i['ner_tags'][prev_value+1:value+1],
|
166 |
+
'tokens': i['tokens'][prev_value+1:value+1]
|
167 |
+
})
|
168 |
+
|
169 |
+
return final_json
|
170 |
+
|
171 |
+
def convert_to_prodigy(self, json_object):
|
172 |
+
|
173 |
+
new_json = []
|
174 |
+
|
175 |
+
for ex in json_object:
|
176 |
+
|
177 |
+
tokenized_text = ex['text'].split()
|
178 |
+
|
179 |
+
list_spans = []
|
180 |
+
|
181 |
+
for a in ex['text_bound_annotations']:
|
182 |
+
|
183 |
+
for o in range(len(a['offsets'])):
|
184 |
+
|
185 |
+
offset_start = a['offsets'][o][0]
|
186 |
+
offset_end = a['offsets'][o][1]
|
187 |
+
|
188 |
+
nb_tokens_annot = len(a['text'][o].split())
|
189 |
+
|
190 |
+
nb_tokens_before_annot = len(ex['text'][:offset_start].split())
|
191 |
+
nb_tokens_after_annot = len(ex['text'][offset_end:].split())
|
192 |
+
|
193 |
+
token_start = nb_tokens_before_annot
|
194 |
+
token_end = token_start + nb_tokens_annot - 1
|
195 |
+
|
196 |
+
list_spans.append({
|
197 |
+
'start': offset_start,
|
198 |
+
'end': offset_end,
|
199 |
+
'token_start': token_start,
|
200 |
+
'token_end': token_end,
|
201 |
+
'label': a['type'],
|
202 |
+
'id': a['id'],
|
203 |
+
'text': a['text'][o],
|
204 |
+
})
|
205 |
+
|
206 |
+
res = {
|
207 |
+
'id': ex['id'],
|
208 |
+
'document_id': ex['document_id'],
|
209 |
+
'text': ex['text'],
|
210 |
+
'tokens': tokenized_text,
|
211 |
+
'spans': list_spans
|
212 |
+
}
|
213 |
+
|
214 |
+
new_json.append(res)
|
215 |
+
|
216 |
+
return new_json
|
217 |
+
|
218 |
+
def convert_to_hf_format(self, json_object, list_label):
|
219 |
+
"""
|
220 |
+
Le format prends en compte le multilabel en faisant une concaténation avec "_" entre chaque label
|
221 |
+
"""
|
222 |
+
|
223 |
+
dict_out = []
|
224 |
+
|
225 |
+
for i in json_object:
|
226 |
+
|
227 |
+
# Filter annotations to keep the longest annotated spans when there is nested annotations
|
228 |
+
selected_annotations = []
|
229 |
+
|
230 |
+
if 'spans' in i:
|
231 |
+
|
232 |
+
# print(len(i['spans']))
|
233 |
+
|
234 |
+
for idx_j, j in enumerate(i['spans']):
|
235 |
+
|
236 |
+
len_j = int(j['end'])-int(j['start'])
|
237 |
+
range_j = [l for l in range(int(j['start']),int(j['end']),1)]
|
238 |
+
|
239 |
+
keep = True
|
240 |
+
|
241 |
+
for idx_k, k in enumerate(i['spans'][idx_j+1:]):
|
242 |
+
|
243 |
+
len_k = int(k['end'])-int(k['start'])
|
244 |
+
range_k = [l for l in range(int(k['start']),int(k['end']),1)]
|
245 |
+
|
246 |
+
inter = list(set(range_k).intersection(set(range_j)))
|
247 |
+
if len(inter) > 0 and len_j < len_k:
|
248 |
+
keep = False
|
249 |
+
|
250 |
+
if keep:
|
251 |
+
selected_annotations.append(j)
|
252 |
+
|
253 |
+
# Create list of labels + id to separate different annotation and prepare IOB2 format
|
254 |
+
nb_tokens = len(i['tokens'])
|
255 |
+
ner_tags = ['O']*nb_tokens
|
256 |
+
|
257 |
+
for slct in selected_annotations:
|
258 |
+
|
259 |
+
for x in range(slct['token_start'], slct['token_end']+1, 1):
|
260 |
+
|
261 |
+
if slct['label'] in list_label:
|
262 |
+
|
263 |
+
if ner_tags[x] == 'O':
|
264 |
+
ner_tags[x] = slct['label']+'-'+slct['id']
|
265 |
+
|
266 |
+
# Make IOB2 format
|
267 |
+
ner_tags_IOB2 = []
|
268 |
+
for idx_l, label in enumerate(ner_tags):
|
269 |
+
# print(label)
|
270 |
+
|
271 |
+
if label == 'O':
|
272 |
+
ner_tags_IOB2.append('O')
|
273 |
+
else:
|
274 |
+
current_label = label.split('-')[0]
|
275 |
+
current_id = label.split('-')[1]
|
276 |
+
if idx_l == 0:
|
277 |
+
ner_tags_IOB2.append('B-'+current_label)
|
278 |
+
elif current_label in ner_tags[idx_l-1]:
|
279 |
+
if current_id == ner_tags[idx_l-1].split('-')[1]:
|
280 |
+
ner_tags_IOB2.append('I-'+current_label)
|
281 |
+
else:
|
282 |
+
ner_tags_IOB2.append('B-'+current_label)
|
283 |
+
else:
|
284 |
+
ner_tags_IOB2.append('B-'+current_label)
|
285 |
+
|
286 |
+
# print(ner_tags_IOB2)
|
287 |
+
dict_out.append({
|
288 |
+
'id': i['id'],
|
289 |
+
'document_id': i['document_id'],
|
290 |
+
"ner_tags": ner_tags_IOB2,
|
291 |
+
"tokens": i['tokens'],
|
292 |
+
})
|
293 |
+
|
294 |
+
return dict_out
|
295 |
+
|
296 |
+
def _generate_examples(self, split):
|
297 |
+
|
298 |
+
ds = load_dataset("bigbio/quaero", f"quaero_{self.config.name}_source")[split]
|
299 |
+
|
300 |
+
if self.config.name == "emea":
|
301 |
+
|
302 |
+
ds = self.split_sentences(
|
303 |
+
self.convert_to_hf_format(
|
304 |
+
self.convert_to_prodigy(ds),
|
305 |
+
_LABELS_BASE,
|
306 |
+
)
|
307 |
+
)
|
308 |
+
|
309 |
+
else:
|
310 |
+
|
311 |
+
ds = self.convert_to_hf_format(
|
312 |
+
self.convert_to_prodigy(ds),
|
313 |
+
_LABELS_BASE,
|
314 |
+
)
|
315 |
+
|
316 |
+
for d in ds:
|
317 |
+
yield d["id"], d
|