Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
970237b
1 Parent(s): bcb9c1b

upload hub_repos/cpi/cpi.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. cpi.py +295 -0
cpi.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ The compound-protein relationship (CPI) dataset consists of 2,613 sentences from abstracts containing
17
+ annotations of proteins, small molecules, and their relationships. For further information see:
18
+ https://pubmed.ncbi.nlm.nih.gov/32126064/ and https://github.com/KerstenDoering/CPI-Pipeline
19
+ """
20
+ import xml.etree.ElementTree as ET
21
+ from pathlib import Path
22
+ from typing import Dict, Iterator, Tuple
23
+
24
+ import datasets
25
+
26
+ from .bigbiohub import kb_features
27
+ from .bigbiohub import BigBioConfig
28
+ from .bigbiohub import Tasks
29
+
30
+ _LANGUAGES = ['English']
31
+ _PUBMED = True
32
+ _LOCAL = False
33
+ _CITATION = """\
34
+ @article{doring2020automated,
35
+ title={Automated recognition of functional compound-protein relationships in literature},
36
+ author={D{\"o}ring, Kersten and Qaseem, Ammar and Becer, Michael and Li, Jianyu and Mishra, Pankaj and Gao, Mingjie and Kirchner, Pascal and Sauter, Florian and Telukunta, Kiran K and Moumbock, Aur{\'e}lien FA and others},
37
+ journal={Plos one},
38
+ volume={15},
39
+ number={3},
40
+ pages={e0220925},
41
+ year={2020},
42
+ publisher={Public Library of Science San Francisco, CA USA}
43
+ }
44
+ """
45
+
46
+ _DATASETNAME = "cpi"
47
+ _DISPLAYNAME = "CPI"
48
+
49
+ _DESCRIPTION = """\
50
+ The compound-protein relationship (CPI) dataset consists of 2,613 sentences from abstracts containing \
51
+ annotations of proteins, small molecules, and their relationships
52
+ """
53
+
54
+ _HOMEPAGE = "https://github.com/KerstenDoering/CPI-Pipeline"
55
+
56
+ _LICENSE = 'ISC License'
57
+
58
+ _URLS = {
59
+ "CPI": "https://github.com/KerstenDoering/CPI-Pipeline/raw/master/data_sets/xml/CPI-DS.xml",
60
+ "CPI_IV": "https://github.com/KerstenDoering/CPI-Pipeline/raw/master/data_sets/xml/CPI-DS_IV.xml",
61
+ "CPI_NIV": "https://github.com/KerstenDoering/CPI-Pipeline/raw/master/data_sets/xml/CPI-DS_IV.xml",
62
+ }
63
+
64
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.NAMED_ENTITY_DISAMBIGUATION, Tasks.RELATION_EXTRACTION]
65
+
66
+ _SOURCE_VERSION = "1.0.2"
67
+ _BIGBIO_VERSION = "1.0.0"
68
+
69
+
70
+ class CpiDataset(datasets.GeneratorBasedBuilder):
71
+ """The compound-protein relationship (CPI) dataset"""
72
+
73
+ ENTITY_TYPE_TO_DB_NAME = {"compound": "PubChem", "protein": "UniProt"}
74
+
75
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
76
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
77
+
78
+ BUILDER_CONFIGS = [
79
+ BigBioConfig(
80
+ name="cpi_source",
81
+ version=SOURCE_VERSION,
82
+ description="CPI source schema",
83
+ schema="source",
84
+ subset_id="cpi",
85
+ ),
86
+ BigBioConfig(
87
+ name="cpi_iv_source",
88
+ version=SOURCE_VERSION,
89
+ description="CPI source schema - subset with interaction verbs",
90
+ schema="source",
91
+ subset_id="cpi_iv",
92
+ ),
93
+ BigBioConfig(
94
+ name="cpi_niv_source",
95
+ version=SOURCE_VERSION,
96
+ description="CPI source schema - subset without interaction verbs",
97
+ schema="source",
98
+ subset_id="cpi_niv",
99
+ ),
100
+ BigBioConfig(
101
+ name="cpi_bigbio_kb",
102
+ version=BIGBIO_VERSION,
103
+ description="CPI BigBio schema",
104
+ schema="bigbio_kb",
105
+ subset_id="cpi",
106
+ ),
107
+ ]
108
+
109
+ DEFAULT_CONFIG_NAME = "cpi_source"
110
+
111
+ def _info(self):
112
+ if self.config.schema == "source":
113
+ features = datasets.Features(
114
+ {
115
+ "document_id": datasets.Value("string"),
116
+ "document_orig_id": datasets.Value("string"),
117
+ "sentences": [
118
+ {
119
+ "sentence_id": datasets.Value("string"),
120
+ "sentence_orig_id": datasets.Value("string"),
121
+ "text": datasets.Value("string"),
122
+ "entities": [
123
+ {
124
+ "entity_id": datasets.Value("string"),
125
+ "entity_orig_id": datasets.Sequence(datasets.Value("string")),
126
+ "type": datasets.Value("string"),
127
+ "offset": datasets.Sequence(datasets.Value("int32")),
128
+ "text": datasets.Value("string"),
129
+ }
130
+ ],
131
+ "pairs": [
132
+ {
133
+ "pair_id": datasets.Value("string"),
134
+ "e1": datasets.Value("string"),
135
+ "e2": datasets.Value("string"),
136
+ "interaction": datasets.Value("bool"),
137
+ }
138
+ ],
139
+ }
140
+ ],
141
+ }
142
+ )
143
+
144
+ elif self.config.schema == "bigbio_kb":
145
+ features = kb_features
146
+
147
+ return datasets.DatasetInfo(
148
+ description=_DESCRIPTION,
149
+ features=features,
150
+ homepage=_HOMEPAGE,
151
+ license=str(_LICENSE),
152
+ citation=_CITATION,
153
+ )
154
+
155
+ def _split_generators(self, dl_manager):
156
+ # Distinguish based on the subset id (cpi, cpi_iv, cpi_niv) which file to load
157
+ subset_url = _URLS[self.config.subset_id.upper()]
158
+ subset_file = dl_manager.download_and_extract(subset_url)
159
+
160
+ return [
161
+ datasets.SplitGenerator(
162
+ name=datasets.Split.TRAIN,
163
+ gen_kwargs={"subset_file": subset_file},
164
+ )
165
+ ]
166
+
167
+ def _generate_examples(self, subset_file: Path) -> Iterator[Tuple[str, Dict]]:
168
+ if self.config.schema == "source":
169
+ for doc_id, document in self._read_source_examples(subset_file):
170
+ yield doc_id, document
171
+
172
+ elif self.config.name == "cpi_bigbio_kb":
173
+ # Note: The sentences in a CPI document does not (necessarily) occur consecutive in
174
+ # the original publication. Nevertheless, in this implementation we capture all sentences
175
+ # of a document in one kb-schema document to explicitly model documents.
176
+
177
+ # Transform each source-schema document to kb-schema document
178
+ for doc_id, source_document in self._read_source_examples(subset_file):
179
+ sentence_offset = 0
180
+ passages = []
181
+ entities = []
182
+ relations = []
183
+
184
+ # Transform all sentences to kb-schema sentences
185
+ for source_sentence in source_document["sentences"]:
186
+ text = source_sentence["text"]
187
+ passages.append(
188
+ {
189
+ "id": source_sentence["sentence_id"],
190
+ "text": [text],
191
+ "offsets": [[sentence_offset + 0, sentence_offset + len(text)]],
192
+ "type": "",
193
+ }
194
+ )
195
+
196
+ # Transform source-schema entities to kb-schema entities
197
+ for source_entity in source_sentence["entities"]:
198
+ db_name = self.ENTITY_TYPE_TO_DB_NAME[source_entity["type"]]
199
+
200
+ entity_offset = source_entity["offset"]
201
+ entity_offset = [sentence_offset + entity_offset[0], sentence_offset + entity_offset[1]]
202
+
203
+ entities.append(
204
+ {
205
+ "id": source_entity["entity_id"],
206
+ "type": source_entity["type"],
207
+ "text": [source_entity["text"]],
208
+ "offsets": [entity_offset],
209
+ "normalized": [
210
+ {"db_name": db_name, "db_id": db_id} for db_id in source_entity["entity_orig_id"]
211
+ ],
212
+ }
213
+ )
214
+
215
+ # Transform source-schema pairs to kb-schema relations
216
+ for source_pair in source_sentence["pairs"]:
217
+ # Ignore pairs that are annotated to be not in a relationship!
218
+ if not source_pair["interaction"]:
219
+ continue
220
+
221
+ relations.append(
222
+ {
223
+ "id": source_pair["pair_id"],
224
+ "type": "compound-protein-interaction",
225
+ "arg1_id": source_pair["e1"],
226
+ "arg2_id": source_pair["e2"],
227
+ "normalized": [],
228
+ }
229
+ )
230
+
231
+ sentence_offset += len(text) + 1
232
+
233
+ kb_document = {
234
+ "id": source_document["document_id"],
235
+ "document_id": source_document["document_orig_id"],
236
+ "passages": passages,
237
+ "entities": entities,
238
+ "relations": relations,
239
+ "events": [],
240
+ "coreferences": [],
241
+ }
242
+
243
+ yield source_document["document_id"], kb_document
244
+
245
+ def _read_source_examples(self, input_file: Path) -> Iterator[Tuple[str, Dict]]:
246
+ """
247
+ Reads all instances of the given input file and parses them into the source format.
248
+ """
249
+ root = ET.parse(input_file)
250
+ for document in root.iter("document"):
251
+ sentences = []
252
+ for sentence in document.iter("sentence"):
253
+ entities = []
254
+ for entity in sentence.iter("entity"):
255
+ char_offsets = entity.attrib["charOffset"].split("-")
256
+ start, end = int(char_offsets[0]), int(char_offsets[1])
257
+
258
+ entities.append(
259
+ {
260
+ "entity_id": entity.attrib["id"],
261
+ "entity_orig_id": entity.attrib["origId"].split(","),
262
+ "type": entity.attrib["type"],
263
+ "text": entity.attrib["text"],
264
+ "offset": [start, end],
265
+ }
266
+ )
267
+
268
+ pairs = []
269
+ for pair in sentence.iter("pair"):
270
+ pairs.append(
271
+ {
272
+ "pair_id": pair.attrib["id"],
273
+ "e1": pair.attrib["e1"],
274
+ "e2": pair.attrib["e2"],
275
+ "interaction": pair.attrib["interaction"].lower() == "true",
276
+ }
277
+ )
278
+
279
+ sentences.append(
280
+ {
281
+ "sentence_id": sentence.attrib["id"],
282
+ "sentence_orig_id": sentence.attrib["origId"],
283
+ "text": sentence.attrib["text"],
284
+ "entities": entities,
285
+ "pairs": pairs,
286
+ }
287
+ )
288
+
289
+ document_dict = {
290
+ "document_id": document.attrib["id"],
291
+ "document_orig_id": document.attrib["origId"],
292
+ "sentences": sentences,
293
+ }
294
+
295
+ yield document.attrib["id"], document_dict