Dr. Jorge Abreu Vicente commited on
Commit
62692d2
1 Parent(s): 49d2e69

Create BLURB.py

Browse files

Add BC5CDR-chem-IOB dataset

Files changed (1) hide show
  1. BLURB.py +189 -0
BLURB.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Loading script for the BLURB (Biomedical Language Understanding and Reasoning Benchmark)
2
+ benchmark for biomedical NLP."""
3
+
4
+ import json
5
+ from pathlib import Path
6
+ import datasets
7
+ import shutil
8
+
9
+
10
+ _CITATION = """\
11
+ @article{2022,
12
+ title={Domain-Specific Language Model Pretraining for Biomedical Natural Language Processing},
13
+ volume={3},
14
+ ISSN={2637-8051},
15
+ url={http://dx.doi.org/10.1145/3458754},
16
+ DOI={10.1145/3458754},
17
+ number={1},
18
+ journal={ACM Transactions on Computing for Healthcare},
19
+ publisher={Association for Computing Machinery (ACM)},
20
+ author={Gu, Yu and Tinn, Robert and Cheng, Hao and Lucas, Michael and Usuyama, Naoto and Liu, Xiaodong and Naumann, Tristan and Gao, Jianfeng and Poon, Hoifung},
21
+ year={2022},
22
+ month={Jan},
23
+ pages={1–23}
24
+ }
25
+ """
26
+
27
+ _DESCRIPTION = """BLURB (Biomedical Language Understanding and Reasoning Benchmark.)
28
+ is a comprehensive benchmark for biomedical NLP, with 13 biomedical NLP datasets in 6
29
+ tasks (NER, PICO, Relation Extraction, Sentence similarity, document classification, question answering).
30
+ Our aim is to facilitate investigations of biomedical natural language processing
31
+ with a specific focus on language model pretraining and to help accelerate progress in universal Biomedical
32
+ NLP applications. The table below compares the datasets comprising BLURB versus the various datasets used in
33
+ previous Biomedical and Clinical BERT language models."""
34
+
35
+ _HOMEPAGE = "https://microsoft.github.io/BLURB/index.html"
36
+
37
+ _LICENSE = "TBD"
38
+
39
+
40
+ _VERSION = "1.0.0"
41
+
42
+ DATA_DIR = "blurb/"
43
+
44
+ BASE_URL_NER = "https://github.com/cambridgeltl/MTL-Bioinformatics-2016/tree/master/data/"
45
+
46
+ logger = datasets.logging.get_logger(__name__)
47
+
48
+ CITATION_BC5_CHEM = """@article{article,
49
+ author = {Li, Jiao and Sun, Yueping and Johnson, Robin and Sciaky, Daniela and Wei, Chih-Hsuan and Leaman, Robert and Davis, Allan Peter and Mattingly, Carolyn and Wiegers, Thomas and lu, Zhiyong},
50
+ year = {2016},
51
+ month = {05},
52
+ pages = {baw068},
53
+ title = {BioCreative V CDR task corpus: a resource for chemical disease relation extraction},
54
+ volume = {2016},
55
+ journal = {Database},
56
+ doi = {10.1093/database/baw068}
57
+ }
58
+ """
59
+
60
+ class BlurbConfig(datasets.BuilderConfig):
61
+ """BuilderConfig for BLURB."""
62
+
63
+ def __init__(self, task, data_url, citation, url, label_classes=("False", "True"), **kwargs):
64
+ """BuilderConfig for BLURB.
65
+ Args:
66
+ task: `string` task the dataset is used for: 'ner', 'pico', 'rel-ext', 'sent-sim', 'doc-clas', 'qa'
67
+ features: `list[string]`, list of the features that will appear in the
68
+ feature dict. Should not include "label".
69
+ data_url: `string`, url to download the data files from.
70
+ citation: `string`, citation for the data set.
71
+ url: `string`, url for information about the data set.
72
+ label_classes: `list[string]`, the list of classes for the label if the
73
+ label is present as a string. Non-string labels will be cast to either
74
+ 'False' or 'True'.
75
+ **kwargs: keyword arguments forwarded to super.
76
+ """
77
+ # Version history:
78
+ super(BlurbConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
79
+ self.task = task
80
+ self.label_classes = label_classes
81
+ self.data_url = data_url
82
+ self.citation = citation
83
+ if self.task == 'ner':
84
+ self.features = datasets.Features(
85
+ {"id": datasets.Value("string"),
86
+ "tokens": datasets.Sequence(datasets.Value("string")),
87
+ "ner_tags": datasets.Sequence(
88
+ datasets.features.ClassLabel(names=self.label_classes)
89
+ )}
90
+ )
91
+ self.base_url = f"{BASE_URL_NER}{self.name}/"
92
+ self.urls = {
93
+ "train": f"{self.base_url}{'train.tsv'}",
94
+ "validation": f"{self.base_url}{'devel.tsv'}",
95
+ "test": f"{self.base_url}{'test.tsv'}"
96
+ }
97
+
98
+
99
+ class Blurb(datasets.GeneratorBasedBuilder):
100
+ """BLURB benchmark dataset for Biomedical Language Understanding and Reasoning Benchmark."""
101
+
102
+ VERSION = datasets.Version("1.0.0")
103
+
104
+ BUILDER_CONFIGS = [
105
+ BlurbConfig(name='BC5CDR-chem-IOB', task='ner', label_classes=['O', 'B-Chemical', 'I-Chemical'],
106
+ version=datasets.Version(_VERSION),
107
+ description='BC5-CHEM',
108
+ citation=CITATION_BC5_CHEM)
109
+ ]
110
+
111
+
112
+ def _info(self):
113
+ return datasets.DatasetInfo(
114
+ description=_DESCRIPTION,
115
+ features=datasets.Features(
116
+ {
117
+ "id": datasets.Value("string"),
118
+ "tokens": datasets.Sequence(datasets.Value("string")),
119
+ "ner_tags": datasets.Sequence(
120
+ datasets.features.ClassLabel(
121
+ names=[
122
+ "O",
123
+ "B-Chemical",
124
+ "I-Chemical",
125
+ ]
126
+ )
127
+ ),
128
+ }
129
+ ),
130
+ supervised_keys=None,
131
+ homepage=_HOMEPAGE,
132
+ citation=_CITATION,
133
+ )
134
+
135
+ def _split_generators(self, dl_manager):
136
+ """Returns SplitGenerators."""
137
+
138
+ if self.config.task == 'ner':
139
+ downloaded_files = dl_manager.download_and_extract(self.config.urls)
140
+
141
+ return _self.ner_split_generator(downloaded_files)
142
+
143
+
144
+ def _generate_examples(self, filepath):
145
+ print("Before the download")
146
+ logger.info("⏳ Generating examples from = %s", filepath)
147
+
148
+ if self.config.task == 'ner':
149
+ self._ner_example_generator(filepath)
150
+
151
+
152
+ def _ner_split_generator(self, downloaded_files):
153
+ return [
154
+ datasets.SplitGenerator(name=datasets.Split.TRAIN,
155
+ gen_kwargs={"filepath": downloaded_files["train"]}),
156
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION,
157
+ gen_kwargs={"filepath": downloaded_files["validation"]}),
158
+ datasets.SplitGenerator(name=datasets.Split.TEST,
159
+ gen_kwargs={"filepath": downloaded_files["test"]}),
160
+ ]
161
+
162
+
163
+ def _ner_example_generator(self, filepath):
164
+ with open(filepath, encoding="utf-8") as f:
165
+ guid = 0
166
+ tokens = []
167
+ ner_tags = []
168
+ for line in f:
169
+ if line == "" or line == "\n":
170
+ if tokens:
171
+ yield guid, {
172
+ "id": str(guid),
173
+ "tokens": tokens,
174
+ "ner_tags": ner_tags,
175
+ }
176
+ guid += 1
177
+ tokens = []
178
+ ner_tags = []
179
+ else:
180
+ # tokens are tab separated
181
+ splits = line.split("\t")
182
+ tokens.append(splits[0])
183
+ ner_tags.append(splits[1].rstrip())
184
+ # last example
185
+ yield guid, {
186
+ "id": str(guid),
187
+ "tokens": tokens,
188
+ "ner_tags": ner_tags,
189
+ }