gabrielaltay commited on
Commit
b93e697
1 Parent(s): fa9d5ab

upload hubscripts/twadrl_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. twadrl.py +167 -0
twadrl.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import glob
17
+ import os
18
+ import re
19
+
20
+ import datasets
21
+
22
+ from .bigbiohub import kb_features
23
+ from .bigbiohub import BigBioConfig
24
+ from .bigbiohub import Tasks
25
+
26
+ _DATASETNAME = "twadrl"
27
+ _DISPLAYNAME = "TwADR-L"
28
+ _LANGUAGES = ['English']
29
+ _PUBMED = False
30
+ _LOCAL = False
31
+ _CITATION = """
32
+ @inproceedings{limsopatham-collier-2016-normalising,
33
+ title = "Normalising Medical Concepts in Social Media Texts by Learning Semantic Representation",
34
+ author = "Limsopatham, Nut and
35
+ Collier, Nigel",
36
+ booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
37
+ month = aug,
38
+ year = "2016",
39
+ address = "Berlin, Germany",
40
+ publisher = "Association for Computational Linguistics",
41
+ url = "https://aclanthology.org/P16-1096",
42
+ doi = "10.18653/v1/P16-1096",
43
+ pages = "1014--1023",
44
+ }
45
+ """
46
+
47
+ _DESCRIPTION = """
48
+ The TwADR-L dataset contains medical concepts written on social media (Twitter) \
49
+ mapped to how they are formally written in medical ontologies (SIDER 4). \
50
+ """
51
+
52
+ _HOMEPAGE = "https://zenodo.org/record/55013"
53
+
54
+ _LICENSE = 'Creative Commons Attribution 4.0 International'
55
+
56
+ _URLs = "https://zenodo.org/record/55013/files/datasets.zip"
57
+
58
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.NAMED_ENTITY_DISAMBIGUATION]
59
+ _SOURCE_VERSION = "1.0.0"
60
+ _BIGBIO_VERSION = "1.0.0"
61
+
62
+
63
+ class TwADRL(datasets.GeneratorBasedBuilder):
64
+ """TwADR-L: Dataset for Normalising Medical Concepts on Twitter."""
65
+
66
+ DEFAULT_CONFIG_NAME = "twadrl_source"
67
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
68
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
69
+
70
+ BUILDER_CONFIGS = [
71
+ BigBioConfig(
72
+ name="twadrl_source",
73
+ version=SOURCE_VERSION,
74
+ description="TwADR-L source schema",
75
+ schema="source",
76
+ subset_id="twadrl",
77
+ ),
78
+ BigBioConfig(
79
+ name="twadrl_bigbio_kb",
80
+ version=BIGBIO_VERSION,
81
+ description="TwADR-L simplified BigBio schema",
82
+ schema="bigbio_kb",
83
+ subset_id="twadrl",
84
+ ),
85
+ ]
86
+
87
+ def _info(self):
88
+ if self.config.schema == "source":
89
+ features = datasets.Features(
90
+ {
91
+ "cui": datasets.Value("string"),
92
+ "medical_concept": datasets.Value("string"),
93
+ "social_media_text": datasets.Value("string"),
94
+ }
95
+ )
96
+ elif self.config.schema == "bigbio_kb":
97
+ features = kb_features
98
+ return datasets.DatasetInfo(
99
+ description=_DESCRIPTION,
100
+ features=features,
101
+ supervised_keys=None,
102
+ homepage=_HOMEPAGE,
103
+ license=str(_LICENSE),
104
+ citation=_CITATION,
105
+ )
106
+
107
+ def _split_generators(self, dl_manager):
108
+ dl_dir = dl_manager.download_and_extract(_URLs)
109
+ dataset_dir = os.path.join(dl_dir, "datasets", "TwADR-L")
110
+ # dataset supports k-folds
111
+ splits = []
112
+ for split_name in [
113
+ datasets.Split.TRAIN,
114
+ datasets.Split.VALIDATION,
115
+ datasets.Split.TEST,
116
+ ]:
117
+ for fold_filepath in glob.glob(
118
+ os.path.join(dataset_dir, f"TwADR-L.fold-*.{split_name}.txt")
119
+ ):
120
+ fold_id = re.search("TwADR-L\.fold-(\d)\.", fold_filepath).group(1)
121
+ split_id = f"{split_name}_{fold_id}"
122
+ splits.append(
123
+ datasets.SplitGenerator(
124
+ name=split_id,
125
+ gen_kwargs={"filepath": fold_filepath, "split_id": split_id},
126
+ )
127
+ )
128
+ return splits
129
+
130
+ def _generate_examples(self, filepath, split_id):
131
+ with open(filepath, "r", encoding="latin-1") as f:
132
+ for i, line in enumerate(f):
133
+ id = f"{split_id}_{i}"
134
+ cui, medical_concept, social_media_text = line.strip().split("\t")
135
+ if self.config.schema == "source":
136
+ yield id, {
137
+ "cui": cui,
138
+ "medical_concept": medical_concept,
139
+ "social_media_text": social_media_text,
140
+ }
141
+ elif self.config.schema == "bigbio_kb":
142
+ text_type = "social_media_text"
143
+ offset = (0, len(social_media_text))
144
+ yield id, {
145
+ "id": id,
146
+ "document_id": id,
147
+ "passages": [
148
+ {
149
+ "id": f"{id}_passage",
150
+ "type": text_type,
151
+ "text": [social_media_text],
152
+ "offsets": [offset],
153
+ }
154
+ ],
155
+ "entities": [
156
+ {
157
+ "id": f"{id}_entity",
158
+ "type": text_type,
159
+ "text": [social_media_text],
160
+ "offsets": [offset],
161
+ "normalized": [{"db_name": "SIDER 4", "db_id": cui}],
162
+ }
163
+ ],
164
+ "events": [],
165
+ "coreferences": [],
166
+ "relations": [],
167
+ }