Datasets:

Modalities:
Text
Languages:
English
Size:
< 1K
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
20bf954
·
1 Parent(s): 8bee9ef

upload hubscripts/cellfinder_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. cellfinder.py +277 -0
cellfinder.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ The CellFinder project aims to create a stem cell data repository by linking
17
+ information from existing public databases and by performing text mining on the
18
+ research literature. The first version of the corpus is composed of 10 full text
19
+ documents containing more than 2,100 sentences, 65,000 tokens and 5,200
20
+ annotations for entities. The corpus has been annotated with six types of
21
+ entities (anatomical parts, cell components, cell lines, cell types,
22
+ genes/protein and species) with an overall inter-annotator agreement around 80%.
23
+
24
+ See: https://www.informatik.hu-berlin.de/de/forschung/gebiete/wbi/resources/cellfinder/
25
+ """
26
+ from pathlib import Path
27
+ from typing import Dict, Iterator, Tuple
28
+
29
+ import datasets
30
+
31
+ from .bigbiohub import kb_features
32
+ from .bigbiohub import BigBioConfig
33
+ from .bigbiohub import Tasks
34
+
35
+ _LANGUAGES = ['English']
36
+ _PUBMED = True
37
+ _LOCAL = False
38
+ _CITATION = """\
39
+ @inproceedings{neves2012annotating,
40
+ title = {Annotating and evaluating text for stem cell research},
41
+ author = {Neves, Mariana and Damaschun, Alexander and Kurtz, Andreas and Leser, Ulf},
42
+ year = 2012,
43
+ booktitle = {
44
+ Proceedings of the Third Workshop on Building and Evaluation Resources for
45
+ Biomedical Text Mining\ (BioTxtM 2012) at Language Resources and Evaluation
46
+ (LREC). Istanbul, Turkey
47
+ },
48
+ pages = {16--23},
49
+ organization = {Citeseer}
50
+ }
51
+ """
52
+
53
+ _DATASETNAME = "cellfinder"
54
+ _DISPLAYNAME = "CellFinder"
55
+
56
+ _DESCRIPTION = """\
57
+ The CellFinder project aims to create a stem cell data repository by linking \
58
+ information from existing public databases and by performing text mining on the \
59
+ research literature. The first version of the corpus is composed of 10 full text \
60
+ documents containing more than 2,100 sentences, 65,000 tokens and 5,200 \
61
+ annotations for entities. The corpus has been annotated with six types of \
62
+ entities (anatomical parts, cell components, cell lines, cell types, \
63
+ genes/protein and species) with an overall inter-annotator agreement around 80%.
64
+
65
+ See: https://www.informatik.hu-berlin.de/de/forschung/gebiete/wbi/resources/cellfinder/
66
+ """
67
+
68
+ _HOMEPAGE = (
69
+ "https://www.informatik.hu-berlin.de/de/forschung/gebiete/wbi/resources/cellfinder/"
70
+ )
71
+ _LICENSE = 'Creative Commons Attribution Share Alike 3.0 Unported'
72
+
73
+ _SOURCE_URL = (
74
+ "https://www.informatik.hu-berlin.de/de/forschung/gebiete/wbi/resources/cellfinder/"
75
+ )
76
+ _URLS = {
77
+ _DATASETNAME: _SOURCE_URL + "cellfinder1_brat.tar.gz",
78
+ _DATASETNAME + "_splits": _SOURCE_URL + "cellfinder1_brat_sections.tar.gz",
79
+ }
80
+
81
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
82
+
83
+ _SOURCE_VERSION = "1.0.0"
84
+ _BIGBIO_VERSION = "1.0.0"
85
+
86
+
87
+ class CellFinderDataset(datasets.GeneratorBasedBuilder):
88
+ """The CellFinder corpus."""
89
+
90
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
91
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
92
+
93
+ BUILDER_CONFIGS = [
94
+ BigBioConfig(
95
+ name="cellfinder_source",
96
+ version=SOURCE_VERSION,
97
+ description="CellFinder source schema",
98
+ schema="source",
99
+ subset_id="cellfinder",
100
+ ),
101
+ BigBioConfig(
102
+ name="cellfinder_bigbio_kb",
103
+ version=BIGBIO_VERSION,
104
+ description="CellFinder BigBio schema",
105
+ schema="bigbio_kb",
106
+ subset_id="cellfinder",
107
+ ),
108
+ BigBioConfig(
109
+ name="cellfinder_splits_source",
110
+ version=SOURCE_VERSION,
111
+ description="CellFinder source schema",
112
+ schema="source",
113
+ subset_id="cellfinder_splits",
114
+ ),
115
+ BigBioConfig(
116
+ name="cellfinder_splits_bigbio_kb",
117
+ version=BIGBIO_VERSION,
118
+ description="CellFinder BigBio schema",
119
+ schema="bigbio_kb",
120
+ subset_id="cellfinder_splits",
121
+ ),
122
+ ]
123
+
124
+ DEFAULT_CONFIG_NAME = "cellfinder_source"
125
+ SPLIT_TO_IDS = {
126
+ "train": [16316465, 17381551, 17389645, 18162134, 18286199],
127
+ "test": [15971941, 16623949, 16672070, 17288595, 17967047],
128
+ }
129
+
130
+ def _info(self):
131
+ if self.config.schema == "source":
132
+ features = datasets.Features(
133
+ {
134
+ "document_id": datasets.Value("string"),
135
+ "text": datasets.Value("string"),
136
+ "type": datasets.Value("string"),
137
+ "entities": [
138
+ {
139
+ "entity_id": datasets.Value("string"),
140
+ "type": datasets.Value("string"),
141
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
142
+ "text": datasets.Sequence(datasets.Value("string")),
143
+ }
144
+ ],
145
+ }
146
+ )
147
+
148
+ elif self.config.schema == "bigbio_kb":
149
+ features = kb_features
150
+
151
+ return datasets.DatasetInfo(
152
+ description=_DESCRIPTION,
153
+ features=features,
154
+ homepage=_HOMEPAGE,
155
+ license=str(_LICENSE),
156
+ citation=_CITATION,
157
+ )
158
+
159
+ def _split_generators(self, dl_manager):
160
+ urls = _URLS[_DATASETNAME]
161
+ if self.config.subset_id.endswith("_splits"):
162
+ urls = _URLS[_DATASETNAME + "_splits"]
163
+
164
+ data_dir = Path(dl_manager.download_and_extract(urls))
165
+
166
+ return [
167
+ datasets.SplitGenerator(
168
+ name=datasets.Split.TRAIN,
169
+ gen_kwargs={"data_dir": data_dir, "split": "train"},
170
+ ),
171
+ datasets.SplitGenerator(
172
+ name=datasets.Split.TEST,
173
+ gen_kwargs={"data_dir": data_dir, "split": "test"},
174
+ ),
175
+ ]
176
+
177
+ def _is_to_exclude(self, file: Path) -> bool:
178
+
179
+ to_exclude = False
180
+
181
+ if (
182
+ file.name.startswith("._")
183
+ or file.name.endswith(".ann")
184
+ or file.name == "LICENSE"
185
+ ):
186
+ to_exclude = True
187
+
188
+ return to_exclude
189
+
190
+ def _not_in_split(self, file: Path, split: str) -> bool:
191
+
192
+ to_exclude = False
193
+
194
+ # SKIP files according to split
195
+ if self.config.subset_id.endswith("_splits"):
196
+ file_id = file.stem.split("_")[0]
197
+ else:
198
+ file_id = file.stem
199
+
200
+ if int(file_id) not in self.SPLIT_TO_IDS[split]:
201
+ to_exclude = True
202
+
203
+ return to_exclude
204
+
205
+ def _generate_examples(
206
+ self, data_dir: Path, split: str
207
+ ) -> Iterator[Tuple[str, Dict]]:
208
+ if self.config.schema == "source":
209
+ for file in data_dir.iterdir():
210
+
211
+ # Ignore hidden files and annotation files - we just consider the brat text files
212
+ if self._is_to_exclude(file=file):
213
+ continue
214
+
215
+ if self._not_in_split(file=file, split=split):
216
+ continue
217
+
218
+ # Read brat annotations for the given text file and convert example to the source format
219
+ brat_example = parsing.parse_brat_file(file)
220
+ source_example = self._to_source_example(file, brat_example)
221
+
222
+ yield source_example["document_id"], source_example
223
+
224
+ elif self.config.schema == "bigbio_kb":
225
+ for file in data_dir.iterdir():
226
+
227
+ # Ignore hidden files and annotation files - we just consider the brat text files
228
+ if self._is_to_exclude(file=file):
229
+ continue
230
+
231
+ if self._not_in_split(file=file, split=split):
232
+ continue
233
+
234
+ # Read brat annotations for the given text file and convert example to the BigBio-KB format
235
+ brat_example = parsing.parse_brat_file(file)
236
+ kb_example = parsing.brat_parse_to_bigbio_kb(brat_example)
237
+ kb_example["id"] = kb_example["document_id"]
238
+
239
+ # Fix text type annotation for the converted example
240
+ kb_example["passages"][0]["type"] = self.get_text_type(file)
241
+
242
+ yield kb_example["id"], kb_example
243
+
244
+ def _to_source_example(self, input_file: Path, brat_example: Dict) -> Dict:
245
+ """
246
+ Converts an example extracted using the default brat parsing logic to the source format
247
+ of the given corpus.
248
+ """
249
+ text_type = self.get_text_type(input_file)
250
+ source_example = {
251
+ "document_id": brat_example["document_id"],
252
+ "text": brat_example["text"],
253
+ "type": text_type,
254
+ }
255
+
256
+ id_prefix = brat_example["document_id"] + "_"
257
+
258
+ source_example["entities"] = []
259
+ for entity_annotation in brat_example["text_bound_annotations"]:
260
+ entity_ann = entity_annotation.copy()
261
+
262
+ entity_ann["entity_id"] = id_prefix + entity_ann["id"]
263
+ entity_ann.pop("id")
264
+
265
+ source_example["entities"].append(entity_ann)
266
+
267
+ return source_example
268
+
269
+ def get_text_type(self, input_file: Path) -> str:
270
+ """
271
+ Exctracts section name from filename, if absent return full_text
272
+ """
273
+
274
+ name_parts = str(input_file.stem).split("_")
275
+ if len(name_parts) == 3:
276
+ return name_parts[2]
277
+ return "full_text"