Datasets:

Modalities:
Text
Languages:
English
Size:
< 1K
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
a7381ad
1 Parent(s): 430fdaf

upload hubscripts/linnaeus_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. linnaeus.py +271 -0
linnaeus.py ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ LINNAEUS provides a novel corpus of full-text documents manually annotated for species mentions.
18
+
19
+ To understand the true performance of the LINNAEUS system, we generated a gold standard dataset specifically
20
+ annotated to evaluate species name identification software. The reliability of this gold standard is high,
21
+ however some species names are likely to be omitted from this evaluation set, as shown by IAA analysis.
22
+ Performance of species tagging by LINNAEUS on full-text articles is very good, with 94.3% recall and
23
+ 97.1% precision on mention level, and 98.1% recall and 90.4% precision on document level.
24
+ """
25
+
26
+ import csv
27
+ import os
28
+ from pathlib import Path
29
+ from typing import Dict, List, Tuple
30
+
31
+ import datasets
32
+
33
+ from .bigbiohub import kb_features
34
+ from .bigbiohub import BigBioConfig
35
+ from .bigbiohub import Tasks
36
+
37
+ _LANGUAGES = ['English']
38
+ _PUBMED = True
39
+ _LOCAL = False
40
+ _CITATION = """\
41
+ @Article{gerner2010linnaeus,
42
+ title={LINNAEUS: a species name identification system for biomedical literature},
43
+ author={Gerner, Martin and Nenadic, Goran and Bergman, Casey M},
44
+ journal={BMC bioinformatics},
45
+ volume={11},
46
+ number={1},
47
+ pages={1--17},
48
+ year={2010},
49
+ publisher={BioMed Central}
50
+ }
51
+ """
52
+
53
+ _DATASETNAME = "linnaeus"
54
+ _DISPLAYNAME = "LINNAEUS"
55
+
56
+ _DESCRIPTION = """\
57
+ Linnaeus is a novel corpus of full-text documents manually annotated for species mentions.
58
+ """
59
+
60
+ _HOMEPAGE = "http://linnaeus.sourceforge.net/"
61
+
62
+ _LICENSE = 'Creative Commons Attribution 4.0 International'
63
+
64
+ _URLS = {
65
+ _DATASETNAME: "https://sourceforge.net/projects/linnaeus/files/Corpora/manual-corpus-species-1.0.tar.gz/download",
66
+ }
67
+
68
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.NAMED_ENTITY_DISAMBIGUATION]
69
+
70
+ _SOURCE_VERSION = "1.0.0"
71
+ _BIGBIO_VERSION = "1.0.0"
72
+
73
+
74
+ class LinnaeusDataset(datasets.GeneratorBasedBuilder):
75
+ """Linneaus provides a new gold-standard corpus of full-text articles
76
+ with manually annotated mentions of species names."""
77
+
78
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
79
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
80
+
81
+ BUILDER_CONFIGS = [
82
+ BigBioConfig(
83
+ name="linnaeus_source",
84
+ version=SOURCE_VERSION,
85
+ description="Linnaeus source schema",
86
+ schema="source",
87
+ subset_id="linnaeus",
88
+ ),
89
+ BigBioConfig(
90
+ name="linnaeus_filtered_source",
91
+ version=SOURCE_VERSION,
92
+ description="Linnaeus source schema (filtered tags)",
93
+ schema="source",
94
+ subset_id="linnaeus_filtered",
95
+ ),
96
+ BigBioConfig(
97
+ name="linnaeus_bigbio_kb",
98
+ version=BIGBIO_VERSION,
99
+ description="Linnaeus BigBio schema",
100
+ schema="bigbio_kb",
101
+ subset_id="linnaeus",
102
+ ),
103
+ BigBioConfig(
104
+ name="linnaeus_filtered_bigbio_kb",
105
+ version=BIGBIO_VERSION,
106
+ description="Linnaeus BigBio schema (filtered tags)",
107
+ schema="bigbio_kb",
108
+ subset_id="linnaeus_filtered",
109
+ ),
110
+ ]
111
+
112
+ DEFAULT_CONFIG_NAME = "linneaus_source"
113
+
114
+ def _info(self) -> datasets.DatasetInfo:
115
+
116
+ if self.config.schema == "source":
117
+ features = datasets.Features(
118
+ {
119
+ "document_id": datasets.Value("string"),
120
+ "document_type": datasets.Value("string"),
121
+ "text": datasets.Value("string"),
122
+ "entities": [
123
+ {
124
+ "id": datasets.Value("string"),
125
+ "type": datasets.Value("string"),
126
+ "text": datasets.Sequence(datasets.Value("string")),
127
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
128
+ "normalized": [
129
+ {
130
+ "db_name": datasets.Value("string"),
131
+ "db_id": datasets.Value("string"),
132
+ }
133
+ ],
134
+ }
135
+ ],
136
+ }
137
+ )
138
+
139
+ elif self.config.schema == "bigbio_kb":
140
+ features = kb_features
141
+
142
+ return datasets.DatasetInfo(
143
+ description=_DESCRIPTION,
144
+ features=features,
145
+ homepage=_HOMEPAGE,
146
+ license=str(_LICENSE),
147
+ citation=_CITATION,
148
+ )
149
+
150
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
151
+ urls = _URLS[_DATASETNAME]
152
+ data_dir = dl_manager.download_and_extract(urls)
153
+
154
+ return [
155
+ datasets.SplitGenerator(
156
+ name=datasets.Split.TRAIN,
157
+ gen_kwargs={
158
+ "data_files": os.path.join(data_dir, "manual-corpus-species-1.0")
159
+ },
160
+ ),
161
+ ]
162
+
163
+ def _generate_examples(self, data_files: Path) -> Tuple[int, Dict]:
164
+ """Yields examples as (key, example) tuples."""
165
+ data_path = Path(os.path.join(data_files, "txt"))
166
+ if self.config.subset_id.endswith("filtered"):
167
+ tags_path = Path(os.path.join(data_files, "filtered_tags.tsv"))
168
+ else:
169
+ tags_path = Path(os.path.join(data_files, "tags.tsv"))
170
+ data_files = list(data_path.glob("*txt"))
171
+ tags = self._load_tags(tags_path)
172
+
173
+ if self.config.schema == "source":
174
+ for guid, data_file in enumerate(data_files):
175
+ document_key = data_file.stem
176
+ if document_key not in tags:
177
+ continue
178
+ example = self._create_source_example(data_file, tags.get(document_key))
179
+ example["document_id"] = str(document_key)
180
+ yield guid, example
181
+
182
+ elif self.config.schema == "bigbio_kb":
183
+ for guid, data_file in enumerate(data_files):
184
+ document_key = data_file.stem
185
+ if document_key not in tags:
186
+ continue
187
+ example = self._create_kb_example(data_file, tags.get(document_key))
188
+ example["document_id"] = str(document_key)
189
+ example["id"] = guid
190
+ yield guid, example
191
+
192
+ @staticmethod
193
+ def _load_tags(path: Path) -> Dict:
194
+ """Loads all tags into a dictionary with document ID as keys and all annotations to that file as values."""
195
+ tags = {}
196
+ document_id_col = 1
197
+
198
+ with open(path, encoding="utf-8") as csv_file:
199
+ reader = csv.reader(csv_file, delimiter="\t")
200
+ next(reader)
201
+ for line in reader:
202
+ document_id = line[document_id_col]
203
+ line.pop(document_id_col)
204
+ if document_id not in tags:
205
+ tags[document_id] = [line]
206
+ else:
207
+ tags[document_id].append(line)
208
+ return tags
209
+
210
+ def _create_source_example(self, txt_file, tags) -> Dict:
211
+ """Creates example in source schema."""
212
+ example = {}
213
+ example["entities"] = []
214
+ with open(txt_file, "r") as file:
215
+ text = file.read()
216
+ example["text"] = text
217
+ example["document_type"] = "Article"
218
+ for tag_id, tag in enumerate(tags):
219
+ species_id, start, end, entity_text, _ = tag
220
+ entity_type, db_name, db_id = species_id.split(":")
221
+ entity = {
222
+ "id": str(tag_id),
223
+ "type": entity_type,
224
+ "text": [entity_text],
225
+ "offsets": [(int(start), int(end))],
226
+ "normalized": [
227
+ {
228
+ "db_name": db_name,
229
+ "db_id": db_id,
230
+ }
231
+ ],
232
+ }
233
+ example["entities"].append(entity)
234
+ return example
235
+
236
+ def _create_kb_example(self, txt_file, tags) -> Dict:
237
+ """Creates example in BigBio KB schema."""
238
+ example = {}
239
+ with open(txt_file, "r") as file:
240
+ text = file.read()
241
+ # Passages
242
+ example["passages"] = [
243
+ {
244
+ "id": f"{txt_file.stem}__text",
245
+ "text": [text],
246
+ "type": "Article",
247
+ "offsets": [(0, len(text))],
248
+ }
249
+ ]
250
+ # Entities
251
+ example["entities"] = []
252
+ for tag_id, tag in enumerate(tags):
253
+ species_id, start, end, entity_text, _ = tag
254
+ entity_type, db_name, db_id = species_id.split(":")
255
+ entity = {
256
+ "id": f"{txt_file.stem}__T{str(tag_id)}",
257
+ "type": entity_type,
258
+ "text": [entity_text],
259
+ "offsets": [(int(start), int(end))],
260
+ "normalized": [
261
+ {
262
+ "db_name": db_name,
263
+ "db_id": db_id,
264
+ }
265
+ ],
266
+ }
267
+ example["entities"].append(entity)
268
+ example["events"] = []
269
+ example["relations"] = []
270
+ example["coreferences"] = []
271
+ return example