Datasets:

Modalities:
Text
Languages:
English
Size:
< 1K
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
89baa2b
1 Parent(s): bec61ed

upload hubscripts/mayosrs_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. mayosrs.py +162 -0
mayosrs.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ MayoSRS consists of 101 clinical term pairs whose relatedness was determined by
18
+ nine medical coders and three physicians from the Mayo Clinic.
19
+ """
20
+
21
+ from typing import Dict, List, Tuple
22
+
23
+ import datasets
24
+ import pandas as pd
25
+
26
+ from .bigbiohub import pairs_features
27
+ from .bigbiohub import BigBioConfig
28
+ from .bigbiohub import Tasks
29
+
30
+ _LANGUAGES = ['English']
31
+ _PUBMED = False
32
+ _LOCAL = False
33
+ _CITATION = """\
34
+ @article{pedersen2007measures,
35
+ title={Measures of semantic similarity and relatedness in the biomedical domain},
36
+ author={Pedersen, Ted and Pakhomov, Serguei VS and Patwardhan, Siddharth and Chute, Christopher G},
37
+ journal={Journal of biomedical informatics},
38
+ volume={40},
39
+ number={3},
40
+ pages={288--299},
41
+ year={2007},
42
+ publisher={Elsevier}
43
+ }
44
+ """
45
+
46
+ _DATASETNAME = "mayosrs"
47
+ _DISPLAYNAME = "MayoSRS"
48
+
49
+ _DESCRIPTION = """\
50
+ MayoSRS consists of 101 clinical term pairs whose relatedness was determined by \
51
+ nine medical coders and three physicians from the Mayo Clinic.
52
+ """
53
+
54
+ _HOMEPAGE = "https://conservancy.umn.edu/handle/11299/196265"
55
+
56
+ _LICENSE = 'Creative Commons Zero v1.0 Universal'
57
+
58
+ _URLS = {
59
+ _DATASETNAME: "https://conservancy.umn.edu/bitstream/handle/11299/196265/MayoSRS.csv?sequence=1&isAllowed=y"
60
+ }
61
+
62
+ _SUPPORTED_TASKS = [Tasks.SEMANTIC_SIMILARITY]
63
+
64
+ _SOURCE_VERSION = "1.0.0"
65
+ _BIGBIO_VERSION = "1.0.0"
66
+
67
+
68
+ class MayosrsDataset(datasets.GeneratorBasedBuilder):
69
+ """MayoSRS consists of 101 clinical term pairs whose relatedness was
70
+ determined by nine medical coders and three physicians from the Mayo Clinic."""
71
+
72
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
73
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
74
+
75
+ BUILDER_CONFIGS = [
76
+ BigBioConfig(
77
+ name="mayosrs_source",
78
+ version=SOURCE_VERSION,
79
+ description="MayoSRS source schema",
80
+ schema="source",
81
+ subset_id="mayosrs",
82
+ ),
83
+ BigBioConfig(
84
+ name="mayosrs_bigbio_pairs",
85
+ version=BIGBIO_VERSION,
86
+ description="MayoSRS BigBio schema",
87
+ schema="bigbio_pairs",
88
+ subset_id="mayosrs",
89
+ ),
90
+ ]
91
+
92
+ DEFAULT_CONFIG_NAME = "mayosrs_source"
93
+
94
+ def _info(self) -> datasets.DatasetInfo:
95
+
96
+ if self.config.schema == "source":
97
+ features = datasets.Features(
98
+ {
99
+ "text_1": datasets.Value("string"),
100
+ "text_2": datasets.Value("string"),
101
+ "label": datasets.Value("float32"),
102
+ "code_1": datasets.Value("string"),
103
+ "code_2": datasets.Value("string"),
104
+ }
105
+ )
106
+
107
+ elif self.config.schema == "bigbio_pairs":
108
+ features = pairs_features
109
+
110
+ return datasets.DatasetInfo(
111
+ description=_DESCRIPTION,
112
+ features=features,
113
+ homepage=_HOMEPAGE,
114
+ license=str(_LICENSE),
115
+ citation=_CITATION,
116
+ )
117
+
118
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
119
+ """Returns SplitGenerators."""
120
+
121
+ urls = _URLS[_DATASETNAME]
122
+ filepath = dl_manager.download_and_extract(urls)
123
+
124
+ return [
125
+ datasets.SplitGenerator(
126
+ name=datasets.Split.TRAIN,
127
+ gen_kwargs={
128
+ "filepath": filepath,
129
+ "split": "train",
130
+ },
131
+ )
132
+ ]
133
+
134
+ def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
135
+ """Yields examples as (key, example) tuples."""
136
+
137
+ if split == "train":
138
+
139
+ data = pd.read_csv(
140
+ filepath,
141
+ sep=",",
142
+ header=0,
143
+ names=["label", "code_1", "code_2", "text_1", "text_2"],
144
+ )
145
+
146
+ if self.config.schema == "source":
147
+ for id_, row in data.iterrows():
148
+ yield id_, row.to_dict()
149
+
150
+ elif self.config.schema == "bigbio_pairs":
151
+ for id_, row in data.iterrows():
152
+ yield id_, {
153
+ "id": id_, # uid is an unique identifier for every record that starts from 1
154
+ "document_id": id_,
155
+ "text_1": row["text_1"],
156
+ "text_2": row["text_2"],
157
+ "label": str(row["label"]),
158
+ }
159
+
160
+ else:
161
+ print("There's no test/val split available for the given dataset")
162
+ return