gabrielaltay commited on
Commit
16c691f
1 Parent(s): 71d9192

upload hubscripts/medical_data_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. medical_data.py +179 -0
medical_data.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+ import pandas as pd
21
+
22
+ from .bigbiohub import entailment_features
23
+ from .bigbiohub import BigBioConfig
24
+ from .bigbiohub import Tasks
25
+
26
+ _LANGUAGES = ['English']
27
+ _PUBMED = False
28
+ _LOCAL = True
29
+ _CITATION = """\
30
+ @misc{ask9medicaldata,
31
+ author = {Khan, Arbaaz},
32
+ title = {Sentiment Analysis for Medical Drugs},
33
+ year = {2019},
34
+ url = {https://www.kaggle.com/datasets/arbazkhan971/analyticvidhyadatasetsentiment},
35
+ }
36
+ """
37
+
38
+ _DATASETNAME = "medical_data"
39
+ _DISPLAYNAME = "Medical Data"
40
+
41
+ _DESCRIPTION = """\
42
+ This dataset is designed to do multiclass classification on medical drugs
43
+ """
44
+
45
+ _HOMEPAGE = ""
46
+
47
+ _LICENSE = 'License information unavailable'
48
+
49
+ _URLS = {}
50
+
51
+ _SUPPORTED_TASKS = [Tasks.TEXTUAL_ENTAILMENT]
52
+
53
+
54
+ _SOURCE_VERSION = "1.0.0"
55
+
56
+ _BIGBIO_VERSION = "1.0.0"
57
+
58
+
59
+ class MedicaldataDatatset(datasets.GeneratorBasedBuilder):
60
+ """This dataset contains comments about patients and the sentiment in those comments about a specific drug that's mentioned.
61
+ 1 - Negative sentiment
62
+ 2 - Positive sentiment
63
+ 0 - Neutral"""
64
+
65
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
66
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
67
+
68
+ BUILDER_CONFIGS = [
69
+ BigBioConfig(
70
+ name=f"{_DATASETNAME}_source",
71
+ version=SOURCE_VERSION,
72
+ description=f"{_DATASETNAME} source schema",
73
+ schema="source",
74
+ subset_id=f"{_DATASETNAME}",
75
+ ),
76
+ BigBioConfig(
77
+ name=f"{_DATASETNAME}_bigbio_te",
78
+ version=BIGBIO_VERSION,
79
+ description=f"{_DATASETNAME} BigBio schema",
80
+ schema="bigbio_te",
81
+ subset_id=f"{_DATASETNAME}",
82
+ ),
83
+ ]
84
+
85
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
86
+
87
+ def _info(self) -> datasets.DatasetInfo:
88
+
89
+ if self.config.schema == "source":
90
+
91
+ features = datasets.Features(
92
+ {
93
+ "hash": datasets.Value("string"),
94
+ "text": datasets.Value("string"),
95
+ "drug_name": datasets.Value("string"),
96
+ "sentiment": datasets.Value("string"),
97
+ }
98
+ )
99
+
100
+ elif self.config.schema == "bigbio_te":
101
+ features = entailment_features
102
+
103
+ return datasets.DatasetInfo(
104
+ description=_DESCRIPTION,
105
+ features=features,
106
+ homepage=_HOMEPAGE,
107
+ license=str(_LICENSE),
108
+ citation=_CITATION,
109
+ )
110
+
111
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
112
+ """Returns SplitGenerators."""
113
+
114
+ if self.config.data_dir is None:
115
+ raise ValueError(
116
+ "This is a local dataset. Please pass the data_dir kwarg to load_dataset."
117
+ )
118
+ else:
119
+ data_dir = self.config.data_dir
120
+
121
+ return [
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.TRAIN,
124
+ gen_kwargs={
125
+ "filepath": os.path.join(data_dir, "train_F3WbcTw.csv"),
126
+ "split": "train",
127
+ },
128
+ ),
129
+ datasets.SplitGenerator(
130
+ name=datasets.Split.TEST,
131
+ gen_kwargs={
132
+ "filepath": os.path.join(data_dir, "test_tOlRoBf.csv"),
133
+ "split": "test",
134
+ },
135
+ ),
136
+ ]
137
+
138
+ def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
139
+ """Yields examples as (key, example) tuples."""
140
+
141
+ if self.config.schema == "source":
142
+ csv_reader = pd.read_csv(filepath, dtype="object")
143
+ # Train and test splits handled differently as test split is missing values
144
+ if split == "train":
145
+ for _cols, line in csv_reader.iterrows():
146
+ document = {}
147
+ document["hash"] = line["unique_hash"]
148
+ document["text"] = line["text"]
149
+ document["drug_name"] = line["drug"]
150
+ document["sentiment"] = line["sentiment"]
151
+ yield document["hash"], document
152
+ else:
153
+ for _cols, line in csv_reader.iterrows():
154
+ document = {}
155
+ document["hash"] = line["unique_hash"]
156
+ document["text"] = line["text"]
157
+ document["drug_name"] = line["drug"]
158
+ document["sentiment"] = None
159
+ yield document["hash"], document
160
+
161
+ elif self.config.schema == "bigbio_te":
162
+ csv_reader = pd.read_csv(filepath, dtype="object")
163
+ # Train and test splits handled differently as test split is missing values
164
+ if split == "train":
165
+ for _cols, line in csv_reader.iterrows():
166
+ document = {}
167
+ document["id"] = line["unique_hash"]
168
+ document["premise"] = line["text"]
169
+ document["hypothesis"] = line["drug"]
170
+ document["label"] = line["sentiment"]
171
+ yield document["id"], document
172
+ else:
173
+ for _cols, line in csv_reader.iterrows():
174
+ document = {}
175
+ document["id"] = line["unique_hash"]
176
+ document["premise"] = line["text"]
177
+ document["hypothesis"] = line["drug"]
178
+ document["label"] = None
179
+ yield document["id"], document