Datasets:

Languages:
English
Multilinguality:
monolingual
Tags:
License:
gabrielaltay commited on
Commit
503a2ae
1 Parent(s): 97f60de

upload hubscripts/jnlpba_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. jnlpba.py +181 -0
jnlpba.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ The data came from the GENIA version 3.02 corpus (Kim et al., 2003).
18
+ This was formed from a controlled search on MEDLINE using the MeSH terms human, blood cells and transcription factors.
19
+ From this search 2,000 abstracts were selected and hand annotated according to a small taxonomy of 48 classes based on
20
+ a chemical classification. Among the classes, 36 terminal classes were used to annotate the GENIA corpus.
21
+ """
22
+
23
+ from typing import Dict, List, Tuple
24
+
25
+ import datasets
26
+
27
+ from .bigbiohub import kb_features
28
+ from .bigbiohub import BigBioConfig
29
+ from .bigbiohub import Tasks
30
+
31
+ _LANGUAGES = ['English']
32
+ _PUBMED = True
33
+ _LOCAL = False
34
+
35
+ # TODO: Add BibTeX citation
36
+ _CITATION = """\
37
+ @inproceedings{collier-kim-2004-introduction,
38
+ title = "Introduction to the Bio-entity Recognition Task at {JNLPBA}",
39
+ author = "Collier, Nigel and Kim, Jin-Dong",
40
+ booktitle = "Proceedings of the International Joint Workshop
41
+ on Natural Language Processing in Biomedicine and its Applications
42
+ ({NLPBA}/{B}io{NLP})",
43
+ month = aug # " 28th and 29th", year = "2004",
44
+ address = "Geneva, Switzerland",
45
+ publisher = "COLING",
46
+ url = "https://aclanthology.org/W04-1213",
47
+ pages = "73--78",
48
+ }
49
+ """
50
+
51
+ _DATASETNAME = "jnlpba"
52
+ _DISPLAYNAME = "JNLPBA"
53
+
54
+ _DESCRIPTION = """\
55
+ NER For Bio-Entities
56
+ """
57
+
58
+ _HOMEPAGE = "http://www.geniaproject.org/shared-tasks/bionlp-jnlpba-shared-task-2004"
59
+
60
+ _LICENSE = 'Creative Commons Attribution 3.0 Unported'
61
+
62
+ _URLS = {
63
+ _DATASETNAME: "http://www.nactem.ac.uk/GENIA/current/Shared-tasks/JNLPBA/Train/Genia4ERtraining.tar.gz",
64
+ }
65
+
66
+ # TODO: add supported task by dataset. One dataset may support multiple tasks
67
+ _SUPPORTED_TASKS = [
68
+ Tasks.NAMED_ENTITY_RECOGNITION
69
+ ] # example: [Tasks.TRANSLATION, Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
70
+
71
+ # TODO: set this to a version that is associated with the dataset. if none exists use "1.0.0"
72
+ # This version doesn't have to be consistent with semantic versioning. Anything that is
73
+ # provided by the original dataset as a version goes.
74
+ _SOURCE_VERSION = "3.2.0"
75
+
76
+ _BIGBIO_VERSION = "1.0.0"
77
+
78
+
79
+ class JNLPBADataset(datasets.GeneratorBasedBuilder):
80
+ """
81
+ The data came from the GENIA version 3.02 corpus
82
+ (Kim et al., 2003).
83
+ This was formed from a controlled search on MEDLINE
84
+ using the MeSH terms human, blood cells and transcription factors.
85
+ From this search 2,000 abstracts were selected and hand annotated
86
+ according to a small taxonomy of 48 classes based on
87
+ a chemical classification.
88
+ Among the classes, 36 terminal classes were used to annotate the GENIA corpus.
89
+ """
90
+
91
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
92
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
93
+
94
+ BUILDER_CONFIGS = [
95
+ BigBioConfig(
96
+ name="jnlpba_source",
97
+ version=SOURCE_VERSION,
98
+ description="jnlpba source schema",
99
+ schema="source",
100
+ subset_id="jnlpba",
101
+ ),
102
+ BigBioConfig(
103
+ name="jnlpba_bigbio_kb",
104
+ version=BIGBIO_VERSION,
105
+ description="jnlpba BigBio schema",
106
+ schema="bigbio_kb",
107
+ subset_id="jnlpba",
108
+ ),
109
+ ]
110
+
111
+ DEFAULT_CONFIG_NAME = "jnlpba_source"
112
+
113
+ def _info(self) -> datasets.DatasetInfo:
114
+
115
+ if self.config.schema == "source":
116
+ features = datasets.load_dataset("jnlpba", split="train").features
117
+
118
+ elif self.config.schema == "bigbio_kb":
119
+ features = kb_features
120
+
121
+ return datasets.DatasetInfo(
122
+ description=_DESCRIPTION,
123
+ features=features,
124
+ homepage=_HOMEPAGE,
125
+ license=str(_LICENSE),
126
+ citation=_CITATION,
127
+ )
128
+
129
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
130
+ """Returns SplitGenerators."""
131
+ data = datasets.load_dataset("jnlpba")
132
+
133
+ return [
134
+ datasets.SplitGenerator(
135
+ name=datasets.Split.TRAIN,
136
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
137
+ gen_kwargs={"data": data["train"]},
138
+ ),
139
+ datasets.SplitGenerator(
140
+ name=datasets.Split.VALIDATION,
141
+ gen_kwargs={"data": data["validation"]},
142
+ ),
143
+ ]
144
+
145
+ def _generate_examples(self, data: datasets.Dataset) -> Tuple[int, Dict]:
146
+ """Yields examples as (key, example) tuples."""
147
+ uid = 0
148
+
149
+ if self.config.schema == "source":
150
+ for key, sample in enumerate(data):
151
+ yield key, sample
152
+
153
+ elif self.config.schema == "bigbio_kb":
154
+ for i, sample in enumerate(data):
155
+ feature_dict = {
156
+ "id": uid,
157
+ "document_id": "NULL",
158
+ "passages": [],
159
+ "entities": [],
160
+ "relations": [],
161
+ "events": [],
162
+ "coreferences": [],
163
+ }
164
+
165
+ uid += 1
166
+ offset_start = 0
167
+ for token, tag in zip(sample["tokens"], sample["ner_tags"]):
168
+ offset_start += len(token) + 1
169
+ feature_dict["entities"].append(
170
+ {
171
+ "id": uid,
172
+ "offsets": [[offset_start, offset_start + len(token)]],
173
+ "text": [token],
174
+ "type": tag,
175
+ "normalized": [],
176
+ }
177
+ )
178
+ uid += 1
179
+
180
+ # entities
181
+ yield i, feature_dict