gabrielaltay commited on
Commit
3d90d4b
1 Parent(s): e5d30dc

initial commit

Browse files
Files changed (3) hide show
  1. README.md +35 -0
  2. bigbiohub.py +153 -0
  3. gad.py +211 -0
README.md CHANGED
@@ -1,3 +1,38 @@
1
  ---
 
2
  license: cc-by-4.0
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language: en
3
  license: cc-by-4.0
4
+ multilinguality: momolingual
5
+ pretty_name: GAD
6
  ---
7
+
8
+
9
+ # Dataset Card for GAD
10
+
11
+ ## Dataset Description
12
+
13
+ - **Homepage:** "https://github.com/dmis-lab/biobert" # This data source is used by the BLURB benchmark
14
+ - **Pubmed:** True
15
+ - **Public:** True
16
+ - **Tasks:** Text Classification
17
+
18
+
19
+ A corpus identifying associations between genes and diseases by a semi-automatic
20
+ annotation procedure based on the Genetic Association Database
21
+
22
+
23
+ ## Citation Information
24
+
25
+ ```
26
+ @article{Bravo2015,
27
+ doi = {10.1186/s12859-015-0472-9},
28
+ url = {https://doi.org/10.1186/s12859-015-0472-9},
29
+ year = {2015},
30
+ month = feb,
31
+ publisher = {Springer Science and Business Media {LLC}},
32
+ volume = {16},
33
+ number = {1},
34
+ author = {{\`{A}}lex Bravo and Janet Pi{\~{n}}ero and N{\'{u}}ria Queralt-Rosinach and Michael Rautschka and Laura I Furlong},
35
+ title = {Extraction of relations between genes and diseases from text and large-scale data analysis: implications for translational research},
36
+ journal = {{BMC} Bioinformatics}
37
+ }
38
+ ```
bigbiohub.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from enum import Enum
3
+ import datasets
4
+ from types import SimpleNamespace
5
+
6
+
7
+ BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")
8
+
9
+
10
+ @dataclass
11
+ class BigBioConfig(datasets.BuilderConfig):
12
+ """BuilderConfig for BigBio."""
13
+
14
+ name: str = None
15
+ version: datasets.Version = None
16
+ description: str = None
17
+ schema: str = None
18
+ subset_id: str = None
19
+
20
+
21
+ class Tasks(Enum):
22
+ NAMED_ENTITY_RECOGNITION = "NER"
23
+ NAMED_ENTITY_DISAMBIGUATION = "NED"
24
+ EVENT_EXTRACTION = "EE"
25
+ RELATION_EXTRACTION = "RE"
26
+ COREFERENCE_RESOLUTION = "COREF"
27
+ QUESTION_ANSWERING = "QA"
28
+ TEXTUAL_ENTAILMENT = "TE"
29
+ SEMANTIC_SIMILARITY = "STS"
30
+ TEXT_PAIRS_CLASSIFICATION = "TXT2CLASS"
31
+ PARAPHRASING = "PARA"
32
+ TRANSLATION = "TRANSL"
33
+ SUMMARIZATION = "SUM"
34
+ TEXT_CLASSIFICATION = "TXTCLASS"
35
+
36
+
37
+ entailment_features = datasets.Features(
38
+ {
39
+ "id": datasets.Value("string"),
40
+ "premise": datasets.Value("string"),
41
+ "hypothesis": datasets.Value("string"),
42
+ "label": datasets.Value("string"),
43
+ }
44
+ )
45
+
46
+ pairs_features = datasets.Features(
47
+ {
48
+ "id": datasets.Value("string"),
49
+ "document_id": datasets.Value("string"),
50
+ "text_1": datasets.Value("string"),
51
+ "text_2": datasets.Value("string"),
52
+ "label": datasets.Value("string"),
53
+ }
54
+ )
55
+
56
+ qa_features = datasets.Features(
57
+ {
58
+ "id": datasets.Value("string"),
59
+ "question_id": datasets.Value("string"),
60
+ "document_id": datasets.Value("string"),
61
+ "question": datasets.Value("string"),
62
+ "type": datasets.Value("string"),
63
+ "choices": [datasets.Value("string")],
64
+ "context": datasets.Value("string"),
65
+ "answer": datasets.Sequence(datasets.Value("string")),
66
+ }
67
+ )
68
+
69
+ text_features = datasets.Features(
70
+ {
71
+ "id": datasets.Value("string"),
72
+ "document_id": datasets.Value("string"),
73
+ "text": datasets.Value("string"),
74
+ "labels": [datasets.Value("string")],
75
+ }
76
+ )
77
+
78
+ text2text_features = datasets.Features(
79
+ {
80
+ "id": datasets.Value("string"),
81
+ "document_id": datasets.Value("string"),
82
+ "text_1": datasets.Value("string"),
83
+ "text_2": datasets.Value("string"),
84
+ "text_1_name": datasets.Value("string"),
85
+ "text_2_name": datasets.Value("string"),
86
+ }
87
+ )
88
+
89
+ kb_features = datasets.Features(
90
+ {
91
+ "id": datasets.Value("string"),
92
+ "document_id": datasets.Value("string"),
93
+ "passages": [
94
+ {
95
+ "id": datasets.Value("string"),
96
+ "type": datasets.Value("string"),
97
+ "text": datasets.Sequence(datasets.Value("string")),
98
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
99
+ }
100
+ ],
101
+ "entities": [
102
+ {
103
+ "id": datasets.Value("string"),
104
+ "type": datasets.Value("string"),
105
+ "text": datasets.Sequence(datasets.Value("string")),
106
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
107
+ "normalized": [
108
+ {
109
+ "db_name": datasets.Value("string"),
110
+ "db_id": datasets.Value("string"),
111
+ }
112
+ ],
113
+ }
114
+ ],
115
+ "events": [
116
+ {
117
+ "id": datasets.Value("string"),
118
+ "type": datasets.Value("string"),
119
+ # refers to the text_bound_annotation of the trigger
120
+ "trigger": {
121
+ "text": datasets.Sequence(datasets.Value("string")),
122
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
123
+ },
124
+ "arguments": [
125
+ {
126
+ "role": datasets.Value("string"),
127
+ "ref_id": datasets.Value("string"),
128
+ }
129
+ ],
130
+ }
131
+ ],
132
+ "coreferences": [
133
+ {
134
+ "id": datasets.Value("string"),
135
+ "entity_ids": datasets.Sequence(datasets.Value("string")),
136
+ }
137
+ ],
138
+ "relations": [
139
+ {
140
+ "id": datasets.Value("string"),
141
+ "type": datasets.Value("string"),
142
+ "arg1_id": datasets.Value("string"),
143
+ "arg2_id": datasets.Value("string"),
144
+ "normalized": [
145
+ {
146
+ "db_name": datasets.Value("string"),
147
+ "db_id": datasets.Value("string"),
148
+ }
149
+ ],
150
+ }
151
+ ],
152
+ }
153
+ )
gad.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import List
3
+
4
+ import datasets
5
+ import pandas as pd
6
+
7
+ from .bigbiohub import text_features
8
+ from .bigbiohub import BigBioConfig
9
+ from .bigbiohub import Tasks
10
+
11
+
12
+ _SOURCE_VIEW_NAME = "source"
13
+ _UNIFIED_VIEW_NAME = "bigbio"
14
+
15
+ _LANGUAGES = ["English"]
16
+ _PUBMED = True
17
+ _LOCAL = False
18
+ _CITATION = """\
19
+ @article{Bravo2015,
20
+ doi = {10.1186/s12859-015-0472-9},
21
+ url = {https://doi.org/10.1186/s12859-015-0472-9},
22
+ year = {2015},
23
+ month = feb,
24
+ publisher = {Springer Science and Business Media {LLC}},
25
+ volume = {16},
26
+ number = {1},
27
+ author = {{\`{A}}lex Bravo and Janet Pi{\~{n}}ero and N{\'{u}}ria Queralt-Rosinach and Michael Rautschka and Laura I Furlong},
28
+ title = {Extraction of relations between genes and diseases from text and large-scale data analysis: implications for translational research},
29
+ journal = {{BMC} Bioinformatics}
30
+ }
31
+ """
32
+
33
+ _DESCRIPTION = """\
34
+ A corpus identifying associations between genes and diseases by a semi-automatic
35
+ annotation procedure based on the Genetic Association Database
36
+ """
37
+
38
+ _DATASETNAME = "gad"
39
+ _DISPLAYNAME = "GAD"
40
+
41
+ _HOMEPAGE = "https://github.com/dmis-lab/biobert" # This data source is used by the BLURB benchmark
42
+
43
+ _LICENSE = "CC_BY_4p0"
44
+
45
+ _URLs = {
46
+ "source": "https://drive.google.com/uc?export=download&id=1-jDKGcXREb2X9xTFnuiJ36PvsqoyHWcw",
47
+ "bigbio_text": "https://drive.google.com/uc?export=download&id=1-jDKGcXREb2X9xTFnuiJ36PvsqoyHWcw",
48
+ }
49
+
50
+ _SUPPORTED_TASKS = [Tasks.TEXT_CLASSIFICATION]
51
+
52
+ _SOURCE_VERSION = "1.0.0"
53
+ _BIGBIO_VERSION = "1.0.0"
54
+
55
+
56
+ class GAD(datasets.GeneratorBasedBuilder):
57
+ """GAD is a weakly labeled dataset for Entity Relations (REL) task which is treated as a sentence classification task."""
58
+
59
+ BUILDER_CONFIGS = [
60
+ # 10-fold source schema
61
+ BigBioConfig(
62
+ name=f"gad_fold{i}_source",
63
+ version=datasets.Version(_SOURCE_VERSION),
64
+ description="GAD source schema",
65
+ schema="source",
66
+ subset_id=f"gad_fold{i}",
67
+ )
68
+ for i in range(10)
69
+ ] + [
70
+ # 10-fold bigbio schema
71
+ BigBioConfig(
72
+ name=f"gad_fold{i}_bigbio_text",
73
+ version=datasets.Version(_BIGBIO_VERSION),
74
+ description="GAD BigBio schema",
75
+ schema="bigbio_text",
76
+ subset_id=f"gad_fold{i}",
77
+ )
78
+ for i in range(10)
79
+ ]
80
+
81
+ # BLURB Benchmark config https://microsoft.github.io/BLURB/
82
+ BUILDER_CONFIGS.append(
83
+ BigBioConfig(
84
+ name=f"gad_blurb_bigbio_text",
85
+ version=datasets.Version(_BIGBIO_VERSION),
86
+ description=f"GAD BLURB benchmark in simplified BigBio schema",
87
+ schema="bigbio_text",
88
+ subset_id=f"gad_blurb",
89
+ )
90
+ )
91
+
92
+ DEFAULT_CONFIG_NAME = "gad_fold0_source"
93
+
94
+ def _info(self):
95
+ if self.config.schema == "source":
96
+ features = datasets.Features(
97
+ {
98
+ "index": datasets.Value("string"),
99
+ "sentence": datasets.Value("string"),
100
+ "label": datasets.Value("int32"),
101
+ }
102
+ )
103
+ elif self.config.schema == "bigbio_text":
104
+ features = text_features
105
+
106
+ return datasets.DatasetInfo(
107
+ description=_DESCRIPTION,
108
+ features=features,
109
+ homepage=_HOMEPAGE,
110
+ license=str(_LICENSE),
111
+ citation=_CITATION,
112
+ )
113
+
114
+ def _split_generators(
115
+ self, dl_manager: datasets.DownloadManager
116
+ ) -> List[datasets.SplitGenerator]:
117
+
118
+ if "blurb" in self.config.name:
119
+ return self._blurb_split_generator(dl_manager)
120
+
121
+ fold_id = int(self.config.subset_id.split("_fold")[1][0]) + 1
122
+
123
+ my_urls = _URLs[self.config.schema]
124
+ data_dir = Path(dl_manager.download_and_extract(my_urls))
125
+ data_files = {
126
+ "train": data_dir / "GAD" / str(fold_id) / "train.tsv",
127
+ "test": data_dir / "GAD" / str(fold_id) / "test.tsv",
128
+ }
129
+
130
+ return [
131
+ datasets.SplitGenerator(
132
+ name=datasets.Split.TRAIN,
133
+ gen_kwargs={"filepath": data_files["train"]},
134
+ ),
135
+ datasets.SplitGenerator(
136
+ name=datasets.Split.TEST,
137
+ gen_kwargs={"filepath": data_files["test"]},
138
+ ),
139
+ ]
140
+
141
+ def _generate_examples(self, filepath: Path):
142
+ if "train.tsv" in str(filepath):
143
+ df = pd.read_csv(filepath, sep="\t", header=None).reset_index()
144
+ else:
145
+ df = pd.read_csv(filepath, sep="\t")
146
+ df.columns = ["id", "sentence", "label"]
147
+
148
+ if self.config.schema == "source":
149
+ for id, row in enumerate(df.itertuples()):
150
+ ex = {
151
+ "index": row.id,
152
+ "sentence": row.sentence,
153
+ "label": int(row.label),
154
+ }
155
+ yield id, ex
156
+ elif self.config.schema == "bigbio_text":
157
+ for id, row in enumerate(df.itertuples()):
158
+ ex = {
159
+ "id": id,
160
+ "document_id": row.id,
161
+ "text": row.sentence,
162
+ "labels": [str(row.label)],
163
+ }
164
+ yield id, ex
165
+ else:
166
+ raise ValueError(f"Invalid config: {self.config.name}")
167
+
168
+ def _blurb_split_generator(self, dl_manager: datasets.DownloadManager):
169
+ """Creates train/dev/test for BLURB split"""
170
+
171
+ my_urls = _URLs[self.config.schema]
172
+ data_dir = Path(dl_manager.download_and_extract(my_urls))
173
+ data_files = {
174
+ "train": data_dir / "GAD" / str(1) / "train.tsv",
175
+ "test": data_dir / "GAD" / str(1) / "test.tsv",
176
+ }
177
+
178
+ root_path = data_files["train"].parents[1]
179
+ # Save the train + validation sets accordingly
180
+ with open(data_files["train"], "r") as f:
181
+ train_data = f.readlines()
182
+
183
+ data = {}
184
+ data["train"], data["dev"] = train_data[:4261], train_data[4261:]
185
+
186
+ for batch in ["train", "dev"]:
187
+ fname = batch + "_blurb.tsv"
188
+ fname = root_path / fname
189
+
190
+ with open(fname, "w") as f:
191
+ f.write("index\tsentence\tlabel\n")
192
+ for idx, line in enumerate(data[batch]):
193
+ f.write(f"{idx}\t{line}")
194
+
195
+ train_fpath = root_path / "train_blurb.tsv"
196
+ dev_fpath = root_path / "dev_blurb.tsv"
197
+
198
+ return [
199
+ datasets.SplitGenerator(
200
+ name=datasets.Split.TRAIN,
201
+ gen_kwargs={"filepath": train_fpath},
202
+ ),
203
+ datasets.SplitGenerator(
204
+ name=datasets.Split.VALIDATION,
205
+ gen_kwargs={"filepath": dev_fpath},
206
+ ),
207
+ datasets.SplitGenerator(
208
+ name=datasets.Split.TEST,
209
+ gen_kwargs={"filepath": data_files["test"]},
210
+ ),
211
+ ]