mstz commited on
Commit
9cc10d5
·
1 Parent(s): dae29ab

updated to datasets 4.*

Browse files
Files changed (5) hide show
  1. README.md +17 -7
  2. has_hypo/train.csv +0 -0
  3. hypo.data +0 -0
  4. hypo.py +0 -168
  5. hypo/train.csv +0 -0
README.md CHANGED
@@ -1,15 +1,25 @@
1
  ---
2
- language:
3
- - en
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  tags:
5
- - hypo
6
  - tabular_classification
7
  - binary_classification
8
- pretty_name: Hypo
9
- task_categories: # Full list at https://github.com/huggingface/hub-docs/blob/main/js/src/lib/interfaces/Types.ts
10
  - tabular-classification
11
- configs:
12
- - hypo
13
  ---
14
  # Hypo
15
  The Hypo dataset.
 
1
  ---
2
+ configs:
3
+ - config_name: has_hypo
4
+ data_files:
5
+ - path: has_hypo/train.csv
6
+ split: train
7
+ default: true
8
+ - config_name: hypo
9
+ data_files:
10
+ - path: hypo/train.csv
11
+ split: train
12
+ default: false
13
+ language: en
14
+ license: unknown
15
+ pretty_name: Hypo
16
+ size_categories: 1M<n<10M
17
  tags:
 
18
  - tabular_classification
19
  - binary_classification
20
+ - multiclass_classification
21
+ task_categories:
22
  - tabular-classification
 
 
23
  ---
24
  # Hypo
25
  The Hypo dataset.
has_hypo/train.csv ADDED
The diff for this file is too large to render. See raw diff
 
hypo.data DELETED
The diff for this file is too large to render. See raw diff
 
hypo.py DELETED
@@ -1,168 +0,0 @@
1
- """Hypo Dataset"""
2
-
3
- from typing import List
4
- from functools import partial
5
-
6
- import datasets
7
-
8
- import pandas
9
-
10
-
11
- VERSION = datasets.Version("1.0.0")
12
-
13
- _ENCODING_DICS = {
14
- "class": {
15
- "negative": 0,
16
- "compensatedhypothyroid": 1,
17
- "secondaryhypothyroid": 2,
18
- "primaryhypothyroid": 3
19
- }
20
- }
21
-
22
- DESCRIPTION = "Hypo dataset."
23
- _HOMEPAGE = ""
24
- _URLS = ("")
25
- _CITATION = """"""
26
-
27
- # Dataset info
28
- urls_per_split = {
29
- "train": "https://huggingface.co/datasets/mstz/hypo/resolve/main/hypo.data"
30
- }
31
- features_types_per_config = {
32
- "hypo": {
33
- "age": datasets.Value("int64"),
34
- "sex": datasets.Value("string"),
35
- "on_thyroxine": datasets.Value("bool"),
36
- "query_on_thyroxine": datasets.Value("bool"),
37
- "on_antithyroid_medication": datasets.Value("bool"),
38
- "sick": datasets.Value("bool"),
39
- "pregnant": datasets.Value("bool"),
40
- "thyroid_surgery": datasets.Value("bool"),
41
- "I131_treatment": datasets.Value("bool"),
42
- "query_hypothyroid": datasets.Value("bool"),
43
- "query_hyperthyroid": datasets.Value("bool"),
44
- "lithium": datasets.Value("bool"),
45
- "goitre": datasets.Value("bool"),
46
- "tumor": datasets.Value("bool"),
47
- "hypopituitary": datasets.Value("bool"),
48
- "psych": datasets.Value("bool"),
49
- "TSH_measured": datasets.Value("bool"),
50
- "TSH": datasets.Value("string"),
51
- "T3_measured": datasets.Value("bool"),
52
- "T3": datasets.Value("float64"),
53
- "TT4_measured": datasets.Value("bool"),
54
- "TT4": datasets.Value("float64"),
55
- "T4U_measured": datasets.Value("bool"),
56
- "T4U": datasets.Value("float64"),
57
- "FTI_measured": datasets.Value("bool"),
58
- "FTI": datasets.Value("float64"),
59
- "TBG_measured": datasets.Value("string"),
60
- "referral_source": datasets.Value("string"),
61
- "class": datasets.ClassLabel(num_classes=4,
62
- names=("negative", "compensated hypothyroid", "secondary hypothyroid", "primary hypothyroid"))
63
- },
64
- "has_hypo": {
65
- "age": datasets.Value("int64"),
66
- "sex": datasets.Value("string"),
67
- "on_thyroxine": datasets.Value("bool"),
68
- "query_on_thyroxine": datasets.Value("bool"),
69
- "on_antithyroid_medication": datasets.Value("bool"),
70
- "sick": datasets.Value("bool"),
71
- "pregnant": datasets.Value("bool"),
72
- "thyroid_surgery": datasets.Value("bool"),
73
- "I131_treatment": datasets.Value("bool"),
74
- "query_hypothyroid": datasets.Value("bool"),
75
- "query_hyperthyroid": datasets.Value("bool"),
76
- "lithium": datasets.Value("bool"),
77
- "goitre": datasets.Value("bool"),
78
- "tumor": datasets.Value("bool"),
79
- "hypopituitary": datasets.Value("bool"),
80
- "psych": datasets.Value("bool"),
81
- "TSH_measured": datasets.Value("bool"),
82
- "TSH": datasets.Value("string"),
83
- "T3_measured": datasets.Value("bool"),
84
- "T3": datasets.Value("string"),
85
- "TT4_measured": datasets.Value("bool"),
86
- "TT4": datasets.Value("float64"),
87
- "T4U_measured": datasets.Value("bool"),
88
- "T4U": datasets.Value("float64"),
89
- "FTI_measured": datasets.Value("bool"),
90
- "FTI": datasets.Value("float64"),
91
- "TBG_measured": datasets.Value("string"),
92
- "referral_source": datasets.Value("string"),
93
- "class": datasets.ClassLabel(num_classes=2)
94
- },
95
- }
96
-
97
- features_per_config = {k: datasets.Features(features_types_per_config[k]) for k in features_types_per_config}
98
-
99
-
100
- class HypoConfig(datasets.BuilderConfig):
101
- def __init__(self, **kwargs):
102
- super(HypoConfig, self).__init__(version=VERSION, **kwargs)
103
- self.features = features_per_config[kwargs["name"]]
104
-
105
-
106
- class Hypo(datasets.GeneratorBasedBuilder):
107
- # dataset versions
108
- DEFAULT_CONFIG = "hypo"
109
- BUILDER_CONFIGS = [
110
- HypoConfig(name="hypo", description="Hypo for multiclass classification."),
111
- HypoConfig(name="has_hypo", description="Hypo for binary classification."),
112
- ]
113
-
114
-
115
- def _info(self):
116
- info = datasets.DatasetInfo(description=DESCRIPTION, citation=_CITATION, homepage=_HOMEPAGE,
117
- features=features_per_config[self.config.name])
118
-
119
- return info
120
-
121
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
122
- downloads = dl_manager.download_and_extract(urls_per_split)
123
-
124
- return [
125
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloads["train"]}),
126
- ]
127
-
128
- def _generate_examples(self, filepath: str):
129
- data = pandas.read_csv(filepath)
130
- data = self.preprocess(data)
131
-
132
- for row_id, row in data.iterrows():
133
- data_row = dict(row)
134
-
135
- yield row_id, data_row
136
-
137
- def preprocess(self, data: pandas.DataFrame) -> pandas.DataFrame:
138
- data.drop("id", axis="columns", inplace=True)
139
- data.drop("TBG", axis="columns", inplace=True)
140
-
141
- data = data[data.age != "?"]
142
- data = data[data.sex != "?"]
143
- data = data[data.TSH != "?"]
144
-
145
- data.loc[data.T3 == "?", "T3"] = -1
146
- data.loc[data.TT4 == "?", "TT4"] = -1
147
- data.loc[data.T4U == "?", "T4U"] = -1
148
- data.loc[data.FTI == "?", "FTI"] = -1
149
-
150
- data = data.infer_objects()
151
-
152
- for feature in _ENCODING_DICS:
153
- encoding_function = partial(self.encode, feature)
154
- data[feature] = data[feature].apply(encoding_function)
155
-
156
- if self.config.name == "has_hypo":
157
- data["class"] = data["class"].apply(lambda x: 0 if x == 0 else 1)
158
- print("has hypo\n\n\n")
159
-
160
- print("classes")
161
- print(data["class"].unique())
162
-
163
- return data[list(features_types_per_config[self.config.name].keys())]
164
-
165
- def encode(self, feature, value):
166
- if feature in _ENCODING_DICS:
167
- return _ENCODING_DICS[feature][value]
168
- raise ValueError(f"Unknown feature: {feature}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
hypo/train.csv ADDED
The diff for this file is too large to render. See raw diff