Dr. Jorge Abreu Vicente commited on
Commit
fba382b
1 Parent(s): 30c51fd

Create sd-nlp-non-tokenized.py

Browse files
Files changed (1) hide show
  1. sd-nlp-non-tokenized.py +229 -0
sd-nlp-non-tokenized.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ # template from : https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import json
22
+ import pdb
23
+ import datasets
24
+ import os
25
+ import logger
26
+
27
+ _BASE_URL = "https://huggingface.co/datasets/EMBO/sd-nlp-non-tokenized/resolve/main/"
28
+
29
+ class SourceDataNLP(datasets.GeneratorBasedBuilder):
30
+ """SourceDataNLP provides datasets to train NLP tasks in cell and molecular biology."""
31
+
32
+ _NER_LABEL_NAMES = [
33
+ "O",
34
+ "I-SMALL_MOLECULE",
35
+ "B-SMALL_MOLECULE",
36
+ "I-GENEPROD",
37
+ "B-GENEPROD",
38
+ "I-SUBCELLULAR",
39
+ "B-SUBCELLULAR",
40
+ "I-CELL",
41
+ "B-CELL",
42
+ "I-TISSUE",
43
+ "B-TISSUE",
44
+ "I-ORGANISM",
45
+ "B-ORGANISM",
46
+ "I-EXP_ASSAY",
47
+ "B-EXP_ASSAY",
48
+ ]
49
+ _SEMANTIC_GENEPROD_ROLES_LABEL_NAMES = ["O", "I-CONTROLLED_VAR", "B-CONTROLLED_VAR", "I-MEASURED_VAR", "B-MEASURED_VAR"]
50
+ _SEMANTIC_SMALL_MOL_ROLES_LABEL_NAMES = ["O", "I-CONTROLLED_VAR", "B-CONTROLLED_VAR", "I-MEASURED_VAR", "B-MEASURED_VAR"]
51
+ _BORING_LABEL_NAMES = ["O", "I-BORING", "B-BORING"]
52
+ _PANEL_START_NAMES = ["O", "B-PANEL_START"]
53
+
54
+ _CITATION = """\
55
+ @Unpublished{
56
+ huggingface: dataset,
57
+ title = {SourceData NLP},
58
+ authors={Thomas Lemberger, EMBO},
59
+ year={2021}
60
+ }
61
+ """
62
+
63
+ _DESCRIPTION = """\
64
+ This dataset is based on the SourceData database and is intented to facilitate training of NLP tasks in the cell and molecualr biology domain.
65
+ """
66
+
67
+ _HOMEPAGE = "https://huggingface.co/datasets/EMBO/sd-nlp-non-tokenized"
68
+
69
+ _LICENSE = "CC-BY 4.0"
70
+
71
+ VERSION = datasets.Version("0.0.1")
72
+
73
+ _URLS = {
74
+ "NER": f"{_BASE_URL}sd_panels_general_tokenization.zip",
75
+ "ROLES": f"{_BASE_URL}sd_panels_general_tokenization.zip",
76
+ "BORING": f"{_BASE_URL}sd_panels_general_tokenization.zip",
77
+ "PANELIZATION": f"{_BASE_URL}sd_fig_general_tokenization.zip",
78
+ }
79
+ BUILDER_CONFIGS = [
80
+ datasets.BuilderConfig(name="NER", version="0.0.1", description="Dataset for entity recognition"),
81
+ datasets.BuilderConfig(name="GENEPROD_ROLES", version="0.0.1", description="Dataset for semantic roles."),
82
+ datasets.BuilderConfig(name="SMALL_MOL_ROLES", version="0.0.1", description="Dataset for semantic roles."),
83
+ datasets.BuilderConfig(name="BORING", version="0.0.1", description="Dataset for semantic roles."),
84
+ datasets.BuilderConfig(
85
+ name="PANELIZATION",
86
+ version="0.0.1",
87
+ description="Dataset for figure legend segmentation into panel-specific legends.",
88
+ ),
89
+ ]
90
+
91
+ DEFAULT_CONFIG_NAME = "NER"
92
+
93
+ def _info(self):
94
+ if self.config.name == "NER":
95
+ features = datasets.Features(
96
+ {
97
+ "words": datasets.Sequence(feature=datasets.Value("string")),
98
+ "label_ids": datasets.Sequence(
99
+ feature=datasets.ClassLabel(num_classes=len(self._NER_LABEL_NAMES), names=self._NER_LABEL_NAMES)
100
+ ),
101
+ }
102
+ )
103
+ elif self.config.name == "GENEPROD_ROLES":
104
+ features = datasets.Features(
105
+ {
106
+ "input_ids": datasets.Sequence(feature=datasets.Value("string")),
107
+ "labels": datasets.Sequence(
108
+ feature=datasets.ClassLabel(
109
+ num_classes=len(self._SEMANTIC_GENEPROD_ROLES_LABEL_NAMES), names=self._SEMANTIC_GENEPROD_ROLES_LABEL_NAMES
110
+ )
111
+ ),
112
+
113
+ }
114
+ )
115
+ elif self.config.name == "SMALL_MOL_ROLES":
116
+ features = datasets.Features(
117
+ {
118
+ "input_ids": datasets.Sequence(feature=datasets.Value("string")),
119
+ "labels": datasets.Sequence(
120
+ feature=datasets.ClassLabel(
121
+ num_classes=len(self._SEMANTIC_SMALL_MOL_ROLES_LABEL_NAMES), names=self._SEMANTIC_SMALL_MOL_ROLES_LABEL_NAMES
122
+ )
123
+ ),
124
+ }
125
+ )
126
+ elif self.config.name == "BORING":
127
+ features = datasets.Features(
128
+ {
129
+ "input_ids": datasets.Sequence(feature=datasets.Value("string")),
130
+ "labels": datasets.Sequence(
131
+ feature=datasets.ClassLabel(num_classes=len(self._BORING_LABEL_NAMES), names=self._BORING_LABEL_NAMES)
132
+ ),
133
+ }
134
+ )
135
+ elif self.config.name == "PANELIZATION":
136
+ features = datasets.Features(
137
+ {
138
+ "input_ids": datasets.Sequence(feature=datasets.Value("string")),
139
+ "labels": datasets.Sequence(
140
+ feature=datasets.ClassLabel(num_classes=len(self._PANEL_START_NAMES), names=self._PANEL_START_NAMES)
141
+ ),
142
+ }
143
+ )
144
+
145
+ return datasets.DatasetInfo(
146
+ description=self._DESCRIPTION,
147
+ features=features,
148
+ supervised_keys=("input_ids", "labels"),
149
+ homepage=self._HOMEPAGE,
150
+ license=self._LICENSE,
151
+ citation=self._CITATION,
152
+ )
153
+
154
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
155
+ """Returns SplitGenerators.
156
+ Uses local files if a data_dir is specified. Otherwise downloads the files from their official url."""
157
+ url = self._URLS[self.config.name]
158
+ data_dir = dl_manager.download_and_extract(url)
159
+ if self.config.name in ["NER", "GENEPROD_ROLES", "SMALL_MOL_ROLES", "BORING"]:
160
+ data_dir += "/sd_panels_general_tokenization"
161
+ elif self.config.name == "PANELIZATION":
162
+ data_dir += "/sd_fig_general_tokenization"
163
+ else:
164
+ raise ValueError(f"unkonwn config name: {self.config.name}")
165
+
166
+ return [
167
+ datasets.SplitGenerator(
168
+ name=datasets.Split.TRAIN,
169
+ # These kwargs will be passed to _generate_examples
170
+ gen_kwargs={
171
+ "filepath": data_dir + "/train.jsonl"},
172
+ ),
173
+ datasets.SplitGenerator(
174
+ name=datasets.Split.TEST,
175
+ gen_kwargs={
176
+ "filepath": data_dir + "/test.jsonl"},
177
+ ),
178
+ datasets.SplitGenerator(
179
+ name=datasets.Split.VALIDATION,
180
+ gen_kwargs={
181
+ "filepath": data_dir + "/eval.jsonl"},
182
+ ),
183
+ ]
184
+
185
+ def _generate_examples(self, filepath):
186
+ """Yields examples. This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
187
+ It is in charge of opening the given file and yielding (key, example) tuples from the dataset
188
+ The key is not important, it's more here for legacy reason (legacy from tfds)"""
189
+
190
+ with open(filepath, encoding="utf-8") as f:
191
+ # logger.info("⏳ Generating examples from = %s", filepath)
192
+ for id_, row in enumerate(f):
193
+ data = json.loads(row)
194
+ if self.config.name == "NER":
195
+ labels = data["label_ids"]["entity_types"]
196
+ tag_mask = [0 if tag == "O" else 1 for tag in labels]
197
+ yield id_, {
198
+ "input_ids": data["input_ids"],
199
+ "labels": labels,
200
+ "tag_mask": tag_mask,
201
+ }
202
+ elif self.config.name == "GENEPROD_ROLES":
203
+ labels = data["label_ids"]["entity_types"]
204
+ geneprod = ["B-GENEPROD", "I-GENEPROD", "B-PROTEIN", "I-PROTEIN", "B-GENE", "I-GENE"]
205
+ tag_mask = [1 if t in geneprod else 0 for t in labels]
206
+ yield id_, {
207
+ "input_ids": data["input_ids"],
208
+ "labels": data["label_ids"]["geneprod_roles"],
209
+ "tag_mask": tag_mask,
210
+ }
211
+ elif self.config.name == "SMALL_MOL_ROLES":
212
+ labels = data["label_ids"]["entity_types"]
213
+ small_mol = ["B-SMALL_MOLECULE", "I-SMALL_MOLECULE"]
214
+ tag_mask = [1 if t in small_mol else 0 for t in labels]
215
+ yield id_, {
216
+ "input_ids": data["input_ids"],
217
+ "labels": data["label_ids"]["small_mol_roles"],
218
+ "tag_mask": tag_mask,
219
+ }
220
+ elif self.config.name == "BORING":
221
+ yield id_, {"input_ids": data["input_ids"], "labels": data["label_ids"]["boring"]}
222
+ elif self.config.name == "PANELIZATION":
223
+ labels = data["label_ids"]["panel_start"]
224
+ tag_mask = [1 if t == "B-PANEL_START" else 0 for t in labels]
225
+ yield id_, {
226
+ "input_ids": data["input_ids"],
227
+ "labels": data["label_ids"]["panel_start"],
228
+ "tag_mask": tag_mask,
229
+ }