gabrielaltay
commited on
Commit
•
41a7e13
1
Parent(s):
7cafdf8
upload hubscripts/muchmore_hub.py to hub from bigbio repo
Browse files- muchmore.py +738 -0
muchmore.py
ADDED
@@ -0,0 +1,738 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
"""
|
17 |
+
A dataset loader for the MuchMore Springer Bilingual Corpus
|
18 |
+
|
19 |
+
homepage
|
20 |
+
|
21 |
+
* https://muchmore.dfki.de/resources1.htm
|
22 |
+
|
23 |
+
description of annotation format
|
24 |
+
|
25 |
+
* https://muchmore.dfki.de/pubs/D4.1.pdf
|
26 |
+
|
27 |
+
Four files are distributed
|
28 |
+
|
29 |
+
* springer_english_train_plain.tar.gz (english plain text of abstracts)
|
30 |
+
* springer_german_train_plain.tar.gz (german plain text of abstracts)
|
31 |
+
* springer_english_train_V4.2.tar.gz (annotated xml in english)
|
32 |
+
* springer_german_train_V4.2.tar.gz (annotated xml in german)
|
33 |
+
|
34 |
+
Each tar file has one member file per abstract.
|
35 |
+
There are keys to join the english and german files
|
36 |
+
but there is not a 1-1 mapping between them (i.e. some
|
37 |
+
english files have no german counterpart and some german
|
38 |
+
files have no english counterpart). However, there is a 1-1
|
39 |
+
mapping between plain text and annotations for a given language
|
40 |
+
(i.e. an abstract in springer_english_train_plain.tar.gz will
|
41 |
+
also be found in springer_english_train_V4.2.tar.gz)
|
42 |
+
|
43 |
+
Counts,
|
44 |
+
|
45 |
+
* 15,631 total abstracts
|
46 |
+
* 7,823 english abstracts
|
47 |
+
* 7,808 german abstracts
|
48 |
+
* 6,374 matched (en/de) abstracts
|
49 |
+
* 1,449 english abstracts with no german
|
50 |
+
* 1,434 german abstracts with no english
|
51 |
+
|
52 |
+
Notes
|
53 |
+
|
54 |
+
* Arthroskopie.00130237.eng.abstr.chunkmorph.annotated.xml seems to be empty
|
55 |
+
|
56 |
+
|
57 |
+
* entity spans can overlap. an example from the first sample:
|
58 |
+
|
59 |
+
{'id': 'Arthroskopie.00130003.eng.abstr-s1-t1',
|
60 |
+
'type': 'umlsterm',
|
61 |
+
'text': ['posterior'],
|
62 |
+
'offsets': [[4, 13]],
|
63 |
+
'normalized': [{'db_name': 'UMLS', 'db_id': 'C0032009'}]},
|
64 |
+
{'id': 'Arthroskopie.00130003.eng.abstr-s1-t8',
|
65 |
+
'type': 'umlsterm',
|
66 |
+
'text': ['posterior cruciate ligament'],
|
67 |
+
'offsets': [[4, 31]],
|
68 |
+
'normalized': [{'db_name': 'UMLS', 'db_id': 'C0080039'}]},
|
69 |
+
{'id': 'Arthroskopie.00130003.eng.abstr-s1-t2',
|
70 |
+
'type': 'umlsterm',
|
71 |
+
'text': ['ligament'],
|
72 |
+
'offsets': [[23, 31]],
|
73 |
+
'normalized': [{'db_name': 'UMLS', 'db_id': 'C0023685'},
|
74 |
+
{'db_name': 'UMLS', 'db_id': 'C0023686'}]},
|
75 |
+
|
76 |
+
|
77 |
+
* semantic relations are defined beween concepts but entities can
|
78 |
+
have multiple concpets associated with them. in the bigbio
|
79 |
+
schema we skip relations between multiple concept of the
|
80 |
+
same entity. an example of a relation that is kept from the
|
81 |
+
source schema is below,
|
82 |
+
|
83 |
+
In [35]: dsd['train'][0]['sentences'][0]['tokens']
|
84 |
+
Out[35]:
|
85 |
+
[{'id': 'w1', 'pos': 'DT', 'lemma': 'the', 'text': 'The'},
|
86 |
+
{'id': 'w2', 'pos': 'JJ', 'lemma': 'posterior', 'text': 'posterior'},
|
87 |
+
{'id': 'w3', 'pos': 'JJ', 'lemma': 'cruciate', 'text': 'cruciate'},
|
88 |
+
{'id': 'w4', 'pos': 'NN', 'lemma': 'ligament', 'text': 'ligament'},
|
89 |
+
{'id': 'w5', 'pos': 'PUNCT', 'lemma': None, 'text': '('},
|
90 |
+
{'id': 'w6', 'pos': 'NN', 'lemma': None, 'text': 'PCL'},
|
91 |
+
{'id': 'w7', 'pos': 'PUNCT', 'lemma': None, 'text': ')'},
|
92 |
+
{'id': 'w8', 'pos': 'VBZ', 'lemma': 'be', 'text': 'is'},
|
93 |
+
{'id': 'w9', 'pos': 'DT', 'lemma': 'the', 'text': 'the'},
|
94 |
+
{'id': 'w10', 'pos': 'JJS', 'lemma': 'strong', 'text': 'strongest'},
|
95 |
+
{'id': 'w11', 'pos': 'NN', 'lemma': 'ligament', 'text': 'ligament'},
|
96 |
+
{'id': 'w12', 'pos': 'IN', 'lemma': 'of', 'text': 'of'},
|
97 |
+
{'id': 'w13', 'pos': 'DT', 'lemma': 'the', 'text': 'the'},
|
98 |
+
{'id': 'w14', 'pos': 'JJ', 'lemma': 'human', 'text': 'human'},
|
99 |
+
{'id': 'w15', 'pos': 'NN', 'lemma': 'knee', 'text': 'knee'},
|
100 |
+
{'id': 'w16', 'pos': 'JJ', 'lemma': 'joint', 'text': 'joint'},
|
101 |
+
{'id': 'w17', 'pos': 'PUNCT', 'lemma': None, 'text': '.'}]
|
102 |
+
|
103 |
+
|
104 |
+
In [36]: dsd['train'][0]['sentences'][0]['semrels'][0]
|
105 |
+
Out[36]: {'id': 'r1', 'term1': 't3.1', 'term2': 't6.1', 'reltype': 'surrounds'}
|
106 |
+
|
107 |
+
In [37]: dsd['train'][0]['sentences'][0]['umlsterms'][2]
|
108 |
+
Out[37]:
|
109 |
+
{'id': 't3',
|
110 |
+
'from': 'w11',
|
111 |
+
'to': 'w11',
|
112 |
+
'concepts': [{'id': 't3.1',
|
113 |
+
'cui': 'C0023685',
|
114 |
+
'preferred': 'Ligaments',
|
115 |
+
'tui': 'T024',
|
116 |
+
'mshs': [{'code': 'A2.513'}]},
|
117 |
+
{'id': 't3.2',
|
118 |
+
'cui': 'C0023686',
|
119 |
+
'preferred': 'Articular ligaments',
|
120 |
+
'tui': 'T023',
|
121 |
+
'mshs': [{'code': 'A2.513.514'}, {'code': 'A2.835.583.512'}]}]}
|
122 |
+
|
123 |
+
In [38]: dsd['train'][0]['sentences'][0]['umlsterms'][5]
|
124 |
+
Out[38]:
|
125 |
+
{'id': 't6',
|
126 |
+
'from': 'w16',
|
127 |
+
'to': 'w16',
|
128 |
+
'concepts': [{'id': 't6.1',
|
129 |
+
'cui': 'C0022417',
|
130 |
+
'preferred': 'Joints',
|
131 |
+
'tui': 'T030',
|
132 |
+
'mshs': [{'code': 'A2.835.583'}]}]}
|
133 |
+
|
134 |
+
"""
|
135 |
+
|
136 |
+
import itertools
|
137 |
+
import os
|
138 |
+
import re
|
139 |
+
import tarfile
|
140 |
+
import xml.etree.ElementTree as ET
|
141 |
+
from collections import defaultdict
|
142 |
+
from typing import Dict, List
|
143 |
+
from xml.etree.ElementTree import Element
|
144 |
+
|
145 |
+
import datasets
|
146 |
+
from datasets import Features, Value
|
147 |
+
|
148 |
+
# TODO: home page has a list of publications but its not clear which to choose
|
149 |
+
# https://muchmore.dfki.de/papers1.htm
|
150 |
+
# to start, chose the one below.
|
151 |
+
# Buitelaar, Paul / Declerck, Thierry / Sacaleanu, Bogdan / Vintar, Spela / Raileanu, Diana / Crispi, Claudia: A Multi-Layered, XML-Based Approach to the Integration of Linguistic and Semantic Annotations. In: Proceedings of EACL 2003 Workshop on Language Technology and the Semantic Web (NLPXML’03), Budapest, Hungary, April 2003.
|
152 |
+
from .bigbiohub import kb_features
|
153 |
+
from .bigbiohub import BigBioConfig
|
154 |
+
from .bigbiohub import Tasks
|
155 |
+
|
156 |
+
_LANGUAGES = ['English', 'German']
|
157 |
+
_PUBMED = True
|
158 |
+
_LOCAL = False
|
159 |
+
_CITATION = """\
|
160 |
+
@inproceedings{buitelaar2003multi,
|
161 |
+
title={A multi-layered, xml-based approach to the integration of linguistic and semantic annotations},
|
162 |
+
author={Buitelaar, Paul and Declerck, Thierry and Sacaleanu, Bogdan and Vintar, {\v{S}}pela and Raileanu, Diana and Crispi, Claudia},
|
163 |
+
booktitle={Proceedings of EACL 2003 Workshop on Language Technology and the Semantic Web (NLPXML'03), Budapest, Hungary},
|
164 |
+
year={2003}
|
165 |
+
}
|
166 |
+
"""
|
167 |
+
|
168 |
+
_DESCRIPTION = """\
|
169 |
+
The corpus used in the MuchMore project is a parallel corpus of English-German scientific
|
170 |
+
medical abstracts obtained from the Springer Link web site. The corpus consists
|
171 |
+
approximately of 1 million tokens for each language. Abstracts are from 41 medical
|
172 |
+
journals, each of which constitutes a relatively homogeneous medical sub-domain (e.g.
|
173 |
+
Neurology, Radiology, etc.). The corpus of downloaded HTML documents is normalized in
|
174 |
+
various ways, in order to produce a clean, plain text version, consisting of a title, abstract
|
175 |
+
and keywords. Additionally, the corpus was aligned on the sentence level.
|
176 |
+
|
177 |
+
Automatic (!) annotation includes: Part-of-Speech; Morphology (inflection and
|
178 |
+
decomposition); Chunks; Semantic Classes (UMLS: Unified Medical Language System,
|
179 |
+
MeSH: Medical Subject Headings, EuroWordNet); Semantic Relations from UMLS.
|
180 |
+
"""
|
181 |
+
|
182 |
+
_DATASETNAME = "muchmore"
|
183 |
+
_DISPLAYNAME = "MuchMore"
|
184 |
+
|
185 |
+
_HOMEPAGE = "https://muchmore.dfki.de/resources1.htm"
|
186 |
+
|
187 |
+
# TODO: website says the following, but don't see a specific license
|
188 |
+
# TODO: add to FAQs about what to do in this situation.
|
189 |
+
|
190 |
+
# "The cross-lingual information access prototype system for the medical domain
|
191 |
+
# will be made publicly accessible through the internet. It provides access to
|
192 |
+
# multilingual information on the basis of a domain ontology and classification.
|
193 |
+
# For the main task of multilingual domain modelling, the project will focus
|
194 |
+
# on German and English. "
|
195 |
+
_LICENSE = 'License information unavailable'
|
196 |
+
_URLs = {
|
197 |
+
"muchmore_source": [
|
198 |
+
"https://muchmore.dfki.de/pubs/springer_english_train_plain.tar.gz",
|
199 |
+
"https://muchmore.dfki.de/pubs/springer_english_train_V4.2.tar.gz",
|
200 |
+
"https://muchmore.dfki.de/pubs/springer_german_train_plain.tar.gz",
|
201 |
+
"https://muchmore.dfki.de/pubs/springer_german_train_V4.2.tar.gz",
|
202 |
+
],
|
203 |
+
"muchmore_bigbio_kb": [
|
204 |
+
"https://muchmore.dfki.de/pubs/springer_english_train_V4.2.tar.gz",
|
205 |
+
"https://muchmore.dfki.de/pubs/springer_german_train_V4.2.tar.gz",
|
206 |
+
],
|
207 |
+
"muchmore_en_bigbio_kb": "https://muchmore.dfki.de/pubs/springer_english_train_V4.2.tar.gz",
|
208 |
+
"muchmore_de_bigbio_kb": "https://muchmore.dfki.de/pubs/springer_german_train_V4.2.tar.gz",
|
209 |
+
"plain": [
|
210 |
+
"https://muchmore.dfki.de/pubs/springer_english_train_plain.tar.gz",
|
211 |
+
"https://muchmore.dfki.de/pubs/springer_german_train_plain.tar.gz",
|
212 |
+
],
|
213 |
+
"plain_en": "https://muchmore.dfki.de/pubs/springer_english_train_plain.tar.gz",
|
214 |
+
"plain_de": "https://muchmore.dfki.de/pubs/springer_german_train_plain.tar.gz",
|
215 |
+
"muchmore_bigbio_t2t": [
|
216 |
+
"https://muchmore.dfki.de/pubs/springer_english_train_plain.tar.gz",
|
217 |
+
"https://muchmore.dfki.de/pubs/springer_german_train_plain.tar.gz",
|
218 |
+
],
|
219 |
+
}
|
220 |
+
|
221 |
+
# took version from annotated file names
|
222 |
+
_SOURCE_VERSION = "4.2.0"
|
223 |
+
_BIGBIO_VERSION = "1.0.0"
|
224 |
+
_SUPPORTED_TASKS = [
|
225 |
+
Tasks.TRANSLATION,
|
226 |
+
Tasks.NAMED_ENTITY_RECOGNITION,
|
227 |
+
Tasks.NAMED_ENTITY_DISAMBIGUATION,
|
228 |
+
Tasks.RELATION_EXTRACTION,
|
229 |
+
]
|
230 |
+
|
231 |
+
NATIVE_ENCODING = "ISO-8859-1"
|
232 |
+
FILE_NAME_PATTERN = r"^(.+?)\.(eng|ger)\.abstr(\.chunkmorph\.annotated\.xml)?$"
|
233 |
+
LANG_MAP = {"eng": "en", "ger": "de"}
|
234 |
+
|
235 |
+
|
236 |
+
class MuchMoreDataset(datasets.GeneratorBasedBuilder):
|
237 |
+
"""MuchMore Springer Bilingual Corpus"""
|
238 |
+
|
239 |
+
DEFAULT_CONFIG_NAME = "muchmore_source"
|
240 |
+
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
241 |
+
BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
|
242 |
+
|
243 |
+
BUILDER_CONFIGS = [
|
244 |
+
BigBioConfig(
|
245 |
+
name="muchmore_source",
|
246 |
+
version=SOURCE_VERSION,
|
247 |
+
description="MuchMore source schema",
|
248 |
+
schema="source",
|
249 |
+
subset_id="muchmore",
|
250 |
+
),
|
251 |
+
BigBioConfig(
|
252 |
+
name="muchmore_bigbio_kb",
|
253 |
+
version=BIGBIO_VERSION,
|
254 |
+
description="MuchMore simplified BigBio kb schema",
|
255 |
+
schema="bigbio_kb",
|
256 |
+
subset_id="muchmore",
|
257 |
+
),
|
258 |
+
BigBioConfig(
|
259 |
+
name="muchmore_en_bigbio_kb",
|
260 |
+
version=BIGBIO_VERSION,
|
261 |
+
description="MuchMore simplified BigBio kb schema",
|
262 |
+
schema="bigbio_kb",
|
263 |
+
subset_id="muchmore_en",
|
264 |
+
),
|
265 |
+
BigBioConfig(
|
266 |
+
name="muchmore_de_bigbio_kb",
|
267 |
+
version=BIGBIO_VERSION,
|
268 |
+
description="MuchMore simplified BigBio kb schema",
|
269 |
+
schema="bigbio_kb",
|
270 |
+
subset_id="muchmore_de",
|
271 |
+
),
|
272 |
+
BigBioConfig(
|
273 |
+
name="muchmore_bigbio_t2t",
|
274 |
+
version=BIGBIO_VERSION,
|
275 |
+
description="MuchMore simplified BigBio translation schema",
|
276 |
+
schema="bigbio_t2t",
|
277 |
+
subset_id="muchmore",
|
278 |
+
),
|
279 |
+
]
|
280 |
+
|
281 |
+
# default config produces english annotations at the moment
|
282 |
+
def _info(self):
|
283 |
+
|
284 |
+
if self.config.schema == "source":
|
285 |
+
features = Features(
|
286 |
+
{
|
287 |
+
"sample_id": Value("string"),
|
288 |
+
"corresp": Value("string"),
|
289 |
+
"language": Value("string"),
|
290 |
+
"abstract": Value("string"),
|
291 |
+
"sentences": [
|
292 |
+
{
|
293 |
+
"id": Value("string"),
|
294 |
+
"corresp": Value("string"),
|
295 |
+
"umlsterms": [
|
296 |
+
{
|
297 |
+
"id": Value("string"),
|
298 |
+
"from": Value("string"),
|
299 |
+
"to": Value("string"),
|
300 |
+
"concepts": [
|
301 |
+
{
|
302 |
+
"id": Value("string"),
|
303 |
+
"cui": Value("string"),
|
304 |
+
"preferred": Value("string"),
|
305 |
+
"tui": Value("string"),
|
306 |
+
"mshs": [
|
307 |
+
{
|
308 |
+
"code": Value("string"),
|
309 |
+
}
|
310 |
+
],
|
311 |
+
}
|
312 |
+
],
|
313 |
+
}
|
314 |
+
],
|
315 |
+
"ewnterms": [
|
316 |
+
{
|
317 |
+
"id": Value("string"),
|
318 |
+
"to": Value("string"),
|
319 |
+
"from": Value("string"),
|
320 |
+
"senses": [
|
321 |
+
{
|
322 |
+
"offset": Value("string"),
|
323 |
+
}
|
324 |
+
],
|
325 |
+
}
|
326 |
+
],
|
327 |
+
"semrels": [
|
328 |
+
{
|
329 |
+
"id": Value("string"),
|
330 |
+
"term1": Value("string"),
|
331 |
+
"term2": Value("string"),
|
332 |
+
"reltype": Value("string"),
|
333 |
+
}
|
334 |
+
],
|
335 |
+
"chunks": [
|
336 |
+
{
|
337 |
+
"id": Value("string"),
|
338 |
+
"to": Value("string"),
|
339 |
+
"from": Value("string"),
|
340 |
+
"type": Value("string"),
|
341 |
+
}
|
342 |
+
],
|
343 |
+
"tokens": [
|
344 |
+
{
|
345 |
+
"id": Value("string"),
|
346 |
+
"pos": Value("string"),
|
347 |
+
"lemma": Value("string"),
|
348 |
+
"text": Value("string"),
|
349 |
+
}
|
350 |
+
],
|
351 |
+
}
|
352 |
+
],
|
353 |
+
}
|
354 |
+
)
|
355 |
+
|
356 |
+
elif self.config.schema == "bigbio_kb":
|
357 |
+
features = kb_features
|
358 |
+
|
359 |
+
elif self.config.name in ("plain", "plain_en", "plain_de"):
|
360 |
+
features = Features(
|
361 |
+
{
|
362 |
+
"sample_id": Value("string"),
|
363 |
+
"sample_id_prefix": Value("string"),
|
364 |
+
"language": Value("string"),
|
365 |
+
"abstract": Value("string"),
|
366 |
+
}
|
367 |
+
)
|
368 |
+
|
369 |
+
elif self.config.schema == "bigbio_t2t":
|
370 |
+
features = text2text_features
|
371 |
+
|
372 |
+
return datasets.DatasetInfo(
|
373 |
+
description=_DESCRIPTION,
|
374 |
+
features=features,
|
375 |
+
supervised_keys=None,
|
376 |
+
homepage=_HOMEPAGE,
|
377 |
+
license=str(_LICENSE),
|
378 |
+
citation=_CITATION,
|
379 |
+
)
|
380 |
+
|
381 |
+
def _split_generators(self, dl_manager):
|
382 |
+
"""Returns SplitGenerators."""
|
383 |
+
my_urls = _URLs[self.config.name]
|
384 |
+
data_dirs = dl_manager.download(my_urls)
|
385 |
+
# ensure that data_dirs is always a list of string paths
|
386 |
+
if isinstance(data_dirs, str):
|
387 |
+
data_dirs = [data_dirs]
|
388 |
+
|
389 |
+
return [
|
390 |
+
datasets.SplitGenerator(
|
391 |
+
name=datasets.Split.TRAIN,
|
392 |
+
gen_kwargs={
|
393 |
+
"file_names_and_pointers": itertools.chain(
|
394 |
+
*[dl_manager.iter_archive(data_dir) for data_dir in data_dirs]
|
395 |
+
),
|
396 |
+
"split": "train",
|
397 |
+
},
|
398 |
+
),
|
399 |
+
]
|
400 |
+
|
401 |
+
@staticmethod
|
402 |
+
def _get_umlsterms_from_xsent(xsent: Element) -> List:
|
403 |
+
xumlsterms = xsent.find("./umlsterms")
|
404 |
+
|
405 |
+
umlsterms = []
|
406 |
+
for xumlsterm in xumlsterms.findall("./umlsterm"):
|
407 |
+
|
408 |
+
concepts = []
|
409 |
+
for xconcept in xumlsterm.findall("./concept"):
|
410 |
+
|
411 |
+
mshs = [
|
412 |
+
{"code": xmsh.get("code")} for xmsh in xconcept.findall("./msh")
|
413 |
+
]
|
414 |
+
|
415 |
+
concept = {
|
416 |
+
"id": xconcept.get("id"),
|
417 |
+
"cui": xconcept.get("cui"),
|
418 |
+
"preferred": xconcept.get("preferred"),
|
419 |
+
"tui": xconcept.get("tui"),
|
420 |
+
"mshs": mshs,
|
421 |
+
}
|
422 |
+
concepts.append(concept)
|
423 |
+
|
424 |
+
umlsterm = {
|
425 |
+
"id": xumlsterm.get("id"),
|
426 |
+
"from": xumlsterm.get("from"),
|
427 |
+
"to": xumlsterm.get("to"),
|
428 |
+
"concepts": concepts,
|
429 |
+
}
|
430 |
+
umlsterms.append(umlsterm)
|
431 |
+
|
432 |
+
return umlsterms
|
433 |
+
|
434 |
+
@staticmethod
|
435 |
+
def _get_ewnterms_from_xsent(xsent: Element) -> List:
|
436 |
+
xewnterms = xsent.find("./ewnterms")
|
437 |
+
|
438 |
+
ewnterms = []
|
439 |
+
for xewnterm in xewnterms.findall("./ewnterm"):
|
440 |
+
|
441 |
+
senses = [
|
442 |
+
{"offset": xsense.get("offset")}
|
443 |
+
for xsense in xewnterm.findall("./sense")
|
444 |
+
]
|
445 |
+
|
446 |
+
ewnterm = {
|
447 |
+
"id": xewnterm.get("id"),
|
448 |
+
"from": xewnterm.get("from"),
|
449 |
+
"to": xewnterm.get("to"),
|
450 |
+
"senses": senses,
|
451 |
+
}
|
452 |
+
ewnterms.append(ewnterm)
|
453 |
+
|
454 |
+
return ewnterms
|
455 |
+
|
456 |
+
@staticmethod
|
457 |
+
def _get_semrels_from_xsent(xsent: Element) -> List[Dict[str, str]]:
|
458 |
+
xsemrels = xsent.find("./semrels")
|
459 |
+
return [
|
460 |
+
{
|
461 |
+
"id": xsemrel.get("id"),
|
462 |
+
"term1": xsemrel.get("term1"),
|
463 |
+
"term2": xsemrel.get("term2"),
|
464 |
+
"reltype": xsemrel.get("reltype"),
|
465 |
+
}
|
466 |
+
for xsemrel in xsemrels.findall("./semrel")
|
467 |
+
]
|
468 |
+
|
469 |
+
@staticmethod
|
470 |
+
def _get_chunks_from_xsent(xsent: Element) -> List[Dict[str, str]]:
|
471 |
+
xchunks = xsent.find("./chunks")
|
472 |
+
return [
|
473 |
+
{
|
474 |
+
"id": xchunk.get("id"),
|
475 |
+
"to": xchunk.get("to"),
|
476 |
+
"from": xchunk.get("from"),
|
477 |
+
"type": xchunk.get("type"),
|
478 |
+
}
|
479 |
+
for xchunk in xchunks.findall("./chunk")
|
480 |
+
]
|
481 |
+
|
482 |
+
@staticmethod
|
483 |
+
def _get_tokens_from_xsent(xsent: Element) -> List[Dict[str, str]]:
|
484 |
+
xtext = xsent.find("./text")
|
485 |
+
return [
|
486 |
+
{
|
487 |
+
"id": xtoken.get("id"),
|
488 |
+
"pos": xtoken.get("pos"),
|
489 |
+
"lemma": xtoken.get("lemma"),
|
490 |
+
"text": xtoken.text,
|
491 |
+
}
|
492 |
+
for xtoken in xtext.findall("./token")
|
493 |
+
]
|
494 |
+
|
495 |
+
def _generate_original_examples(self, file_names_and_pointers):
|
496 |
+
"""Generate something close to the original dataset.
|
497 |
+
|
498 |
+
This will yield one sample per abstract with the plaintext
|
499 |
+
and the annotations combined into one object. If an abstract
|
500 |
+
is available in both english and german each language version
|
501 |
+
will be a distinct example.
|
502 |
+
"""
|
503 |
+
abstracts = {}
|
504 |
+
samples = {}
|
505 |
+
for file_name, fp in file_names_and_pointers:
|
506 |
+
|
507 |
+
if file_name.endswith(".abstr"):
|
508 |
+
sample_id = file_name
|
509 |
+
abstracts[sample_id] = fp.read().decode(NATIVE_ENCODING)
|
510 |
+
|
511 |
+
elif file_name.endswith(".abstr.chunkmorph.annotated.xml"):
|
512 |
+
content_bytes = fp.read()
|
513 |
+
content_str = content_bytes.decode(NATIVE_ENCODING)
|
514 |
+
if content_str == "":
|
515 |
+
continue
|
516 |
+
|
517 |
+
xroot = ET.fromstring(content_str)
|
518 |
+
|
519 |
+
sentences = []
|
520 |
+
for xsent in xroot.findall("./"):
|
521 |
+
sentence = {
|
522 |
+
"id": xsent.get("id"),
|
523 |
+
"corresp": xsent.get("corresp"),
|
524 |
+
"umlsterms": self._get_umlsterms_from_xsent(xsent),
|
525 |
+
"ewnterms": self._get_ewnterms_from_xsent(xsent),
|
526 |
+
"semrels": self._get_semrels_from_xsent(xsent),
|
527 |
+
"chunks": self._get_chunks_from_xsent(xsent),
|
528 |
+
"tokens": self._get_tokens_from_xsent(xsent),
|
529 |
+
}
|
530 |
+
sentences.append(sentence)
|
531 |
+
|
532 |
+
sample_id = xroot.get("id")
|
533 |
+
samples[sample_id] = {
|
534 |
+
"sample_id": sample_id,
|
535 |
+
"corresp": xroot.get("corresp"),
|
536 |
+
"language": xroot.get("lang"),
|
537 |
+
"sentences": sentences,
|
538 |
+
}
|
539 |
+
|
540 |
+
for _id, (sample_id, sample) in enumerate(samples.items()):
|
541 |
+
sample["abstract"] = abstracts[sample_id]
|
542 |
+
yield _id, sample
|
543 |
+
|
544 |
+
def _generate_bigbio_kb_examples(self, file_names_and_pointers):
|
545 |
+
"""Generate big science biomedical kb examples."""
|
546 |
+
|
547 |
+
def snippets_tokens_from_sents(sentences):
|
548 |
+
snippets = []
|
549 |
+
for sentence in sentences:
|
550 |
+
snippet = [el["text"] for el in sentence["tokens"]]
|
551 |
+
snippets.append(snippet)
|
552 |
+
return snippets
|
553 |
+
|
554 |
+
def sid_to_text_off(sid, snip_txts_lens):
|
555 |
+
ii_sid = int(sid[1:])
|
556 |
+
start = sum(snip_txts_lens[: ii_sid - 1]) + (ii_sid - 1)
|
557 |
+
end = start + snip_txts_lens[ii_sid - 1]
|
558 |
+
return start, end
|
559 |
+
|
560 |
+
def sid_wid_to_text_off(sid, wid, snip_txts_lens, snip_toks_lens):
|
561 |
+
s_start, s_end = sid_to_text_off(sid, snip_txts_lens)
|
562 |
+
ii_sid = int(sid[1:])
|
563 |
+
ii_wid = int(wid[1:])
|
564 |
+
w_start = sum(snip_toks_lens[ii_sid - 1][: ii_wid - 1]) + (ii_wid - 1)
|
565 |
+
start = s_start + w_start
|
566 |
+
end = start + snip_toks_lens[ii_sid - 1][ii_wid - 1]
|
567 |
+
return start, end
|
568 |
+
|
569 |
+
for _id, (file_name, fp) in enumerate(file_names_and_pointers):
|
570 |
+
|
571 |
+
content_bytes = fp.read()
|
572 |
+
content_str = content_bytes.decode(NATIVE_ENCODING)
|
573 |
+
if content_str == "":
|
574 |
+
continue
|
575 |
+
|
576 |
+
xroot = ET.fromstring(content_str)
|
577 |
+
|
578 |
+
sentences = []
|
579 |
+
for xsent in xroot.findall("./"):
|
580 |
+
sentence = {
|
581 |
+
"id": xsent.get("id"),
|
582 |
+
"corresp": xsent.get("corresp"),
|
583 |
+
"umlsterms": self._get_umlsterms_from_xsent(xsent),
|
584 |
+
"ewnterms": self._get_ewnterms_from_xsent(xsent),
|
585 |
+
"semrels": self._get_semrels_from_xsent(xsent),
|
586 |
+
"chunks": self._get_chunks_from_xsent(xsent),
|
587 |
+
"tokens": self._get_tokens_from_xsent(xsent),
|
588 |
+
}
|
589 |
+
sentences.append(sentence)
|
590 |
+
|
591 |
+
snip_toks = snippets_tokens_from_sents(sentences)
|
592 |
+
snip_txts = [" ".join(snip_tok) for snip_tok in snip_toks]
|
593 |
+
snip_txts_lens = [len(el) for el in snip_txts]
|
594 |
+
snip_toks_lens = [[len(tok) for tok in snip] for snip in snip_toks]
|
595 |
+
text = " ".join(snip_txts)
|
596 |
+
passages = [
|
597 |
+
{
|
598 |
+
"id": "{}-passage-0".format(xroot.get("id")),
|
599 |
+
"type": "abstract",
|
600 |
+
"text": [text],
|
601 |
+
"offsets": [(0, len(text))],
|
602 |
+
}
|
603 |
+
]
|
604 |
+
|
605 |
+
entities = []
|
606 |
+
rel_map = {}
|
607 |
+
for sentence in sentences:
|
608 |
+
sid = sentence["id"]
|
609 |
+
ii_sid = int(sid[1:])
|
610 |
+
|
611 |
+
for umlsterm in sentence["umlsterms"]:
|
612 |
+
umlsterm_id = umlsterm["id"]
|
613 |
+
entity_id = f"{sid}-{umlsterm_id}"
|
614 |
+
wid_from = umlsterm["from"]
|
615 |
+
wid_to = umlsterm["to"]
|
616 |
+
ii_wid_from = int(wid_from[1:])
|
617 |
+
ii_wid_to = int(wid_to[1:])
|
618 |
+
|
619 |
+
tok_text = " ".join(
|
620 |
+
snip_toks[ii_sid - 1][ii_wid_from - 1 : ii_wid_to]
|
621 |
+
)
|
622 |
+
w_from_start, w_from_end = sid_wid_to_text_off(
|
623 |
+
sid, wid_from, snip_txts_lens, snip_toks_lens
|
624 |
+
)
|
625 |
+
w_to_start, w_to_end = sid_wid_to_text_off(
|
626 |
+
sid, wid_to, snip_txts_lens, snip_toks_lens
|
627 |
+
)
|
628 |
+
|
629 |
+
offsets = [(w_from_start, w_to_end)]
|
630 |
+
main_text = text[w_from_start:w_to_end]
|
631 |
+
umls_cuis = [el["cui"] for el in umlsterm["concepts"]]
|
632 |
+
for concept in umlsterm["concepts"]:
|
633 |
+
rel_map[concept["id"]] = entity_id
|
634 |
+
|
635 |
+
entity = {
|
636 |
+
"id": "{}-{}".format(xroot.get("id"), entity_id),
|
637 |
+
"offsets": offsets,
|
638 |
+
"text": [tok_text],
|
639 |
+
"type": "umlsterm",
|
640 |
+
"normalized": [
|
641 |
+
{"db_name": "UMLS", "db_id": cui} for cui in umls_cuis
|
642 |
+
],
|
643 |
+
}
|
644 |
+
entities.append(entity)
|
645 |
+
|
646 |
+
relations = []
|
647 |
+
for sentence in sentences:
|
648 |
+
sid = sentence["id"]
|
649 |
+
for semrel in sentence["semrels"]:
|
650 |
+
semrel_id = semrel["id"]
|
651 |
+
rel_id = "{}-{}-{}-{}".format(
|
652 |
+
sid, semrel_id, semrel["term1"], semrel["term2"],
|
653 |
+
)
|
654 |
+
arg1_id = "{}-{}".format(xroot.get("id"), rel_map[semrel["term1"]])
|
655 |
+
arg2_id = "{}-{}".format(xroot.get("id"), rel_map[semrel["term2"]])
|
656 |
+
# some semrels are between multiple normalizations of
|
657 |
+
# a single entity. we skip these. see docstring at top
|
658 |
+
# of module for more complete description
|
659 |
+
if arg1_id == arg2_id:
|
660 |
+
continue
|
661 |
+
relation = {
|
662 |
+
"id": "{}-{}".format(xroot.get("id"), rel_id),
|
663 |
+
"type": semrel["reltype"],
|
664 |
+
"arg1_id": arg1_id,
|
665 |
+
"arg2_id": arg2_id,
|
666 |
+
"normalized": []
|
667 |
+
}
|
668 |
+
relations.append(relation)
|
669 |
+
|
670 |
+
yield _id, {
|
671 |
+
"id": xroot.get("id"),
|
672 |
+
"document_id": xroot.get("id"),
|
673 |
+
"passages": passages,
|
674 |
+
"entities": entities,
|
675 |
+
"coreferences": [],
|
676 |
+
"events": [],
|
677 |
+
"relations": relations,
|
678 |
+
}
|
679 |
+
|
680 |
+
def _generate_plain_examples(self, file_names_and_pointers):
|
681 |
+
"""Generate plain text abstract examples."""
|
682 |
+
for _id, (file_name, fp) in enumerate(file_names_and_pointers):
|
683 |
+
match = re.match(FILE_NAME_PATTERN, file_name)
|
684 |
+
yield _id, {
|
685 |
+
"sample_id_prefix": match.group(1),
|
686 |
+
"sample_id": file_name,
|
687 |
+
"language": LANG_MAP[match.group(2)],
|
688 |
+
"abstract": fp.read().decode(NATIVE_ENCODING),
|
689 |
+
}
|
690 |
+
|
691 |
+
def _generate_translation_examples(self, file_names_and_pointers):
|
692 |
+
sample_map = defaultdict(list)
|
693 |
+
for file_name, fp in file_names_and_pointers:
|
694 |
+
if file_name.endswith("eng.abstr"):
|
695 |
+
language = "en"
|
696 |
+
elif file_name.endswith("ger.abstr"):
|
697 |
+
language = "de"
|
698 |
+
else:
|
699 |
+
raise ValueError()
|
700 |
+
sample_id_prefix = re.sub(".(eng|ger).abstr$", "", file_name)
|
701 |
+
sample_id = file_name
|
702 |
+
abstract = fp.read().decode(NATIVE_ENCODING)
|
703 |
+
sample_map[sample_id_prefix].append(
|
704 |
+
{"language": language, "sample_id": sample_id, "abstract": abstract}
|
705 |
+
)
|
706 |
+
|
707 |
+
_id = 0
|
708 |
+
for sample_id_prefix, sample_pair in sample_map.items():
|
709 |
+
if len(sample_pair) != 2:
|
710 |
+
continue
|
711 |
+
en_idx = 0 if sample_pair[0]["language"] == "en" else 1
|
712 |
+
de_idx = 0 if en_idx == 1 else 1
|
713 |
+
yield _id, {
|
714 |
+
"id": sample_id_prefix,
|
715 |
+
"document_id": sample_id_prefix,
|
716 |
+
"text_1": sample_pair[en_idx]["abstract"],
|
717 |
+
"text_2": sample_pair[de_idx]["abstract"],
|
718 |
+
"text_1_name": "en",
|
719 |
+
"text_2_name": "de",
|
720 |
+
}
|
721 |
+
_id += 1
|
722 |
+
|
723 |
+
def _generate_examples(self, file_names_and_pointers, split):
|
724 |
+
|
725 |
+
if self.config.schema == "source":
|
726 |
+
genny = self._generate_original_examples(file_names_and_pointers)
|
727 |
+
|
728 |
+
elif self.config.schema == "bigbio_kb":
|
729 |
+
genny = self._generate_bigbio_kb_examples(file_names_and_pointers)
|
730 |
+
|
731 |
+
elif self.config.name in ("plain", "plain_en", "plain_de"):
|
732 |
+
genny = self._generate_plain_examples(file_names_and_pointers)
|
733 |
+
|
734 |
+
elif self.config.schema == "bigbio_t2t":
|
735 |
+
genny = self._generate_translation_examples(file_names_and_pointers)
|
736 |
+
|
737 |
+
for _id, sample in genny:
|
738 |
+
yield _id, sample
|