mstz commited on
Commit
270281d
1 Parent(s): f5cd965

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +28 -1
  2. phoneme.csv +0 -0
  3. phoneme.py +68 -0
README.md CHANGED
@@ -1,3 +1,30 @@
1
  ---
2
- license: cc-by-4.0
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language:
3
+ - en
4
+ tags:
5
+ - phoneme
6
+ - tabular_classification
7
+ - binary_classification
8
+ pretty_name: Phoneme
9
+ size_categories:
10
+ - 10k<n<100K
11
+ task_categories: # Full list at https://github.com/huggingface/hub-docs/blob/main/js/src/lib/interfaces/Types.ts
12
+ - tabular-classification
13
+ configs:
14
+ - phoneme
15
  ---
16
+ # Phoneme
17
+ The [Phoneme dataset](https://www.openml.org/search?type=data&sort=runs&id=1489&status=active) from the [OpenML repository](https://www.openml.org/).
18
+
19
+ # Configurations and tasks
20
+ | **Configuration** | **Task** |
21
+ |-------------------|---------------------------|
22
+ | phoneme | Binary classification |
23
+
24
+
25
+ # Usage
26
+ ```python
27
+ from datasets import load_dataset
28
+
29
+ dataset = load_dataset("mstz/phoneme")["train"]
30
+ ```
phoneme.csv ADDED
The diff for this file is too large to render. See raw diff
 
phoneme.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ import datasets
4
+
5
+ import pandas
6
+
7
+
8
+ VERSION = datasets.Version("1.0.0")
9
+
10
+
11
+ DESCRIPTION = "Phoneme dataset from the OpenML repository."
12
+ _HOMEPAGE = "https://www.openml.org/search?type=data&sort=runs&id=722&status=active"
13
+ _URLS = ("https://www.openml.org/search?type=data&sort=runs&id=722&status=active")
14
+ _CITATION = """"""
15
+
16
+ # Dataset info
17
+ urls_per_split = {
18
+ "train": "https://huggingface.co/datasets/mstz/phoneme/raw/main/phoneme.csv"
19
+ }
20
+ features_types_per_config = {
21
+ "phoneme": {
22
+ "V1": datasets.Value("float64"),
23
+ "V2": datasets.Value("float64"),
24
+ "V3": datasets.Value("float64"),
25
+ "V4": datasets.Value("float64"),
26
+ "V5": datasets.Value("float64"),
27
+ "class": datasets.ClassLabel(num_classes=2, names=("no", "yes"))
28
+ }
29
+
30
+ }
31
+ features_per_config = {k: datasets.Features(features_types_per_config[k]) for k in features_types_per_config}
32
+
33
+
34
+ class PhonemeConfig(datasets.BuilderConfig):
35
+ def __init__(self, **kwargs):
36
+ super(PhonemeConfig, self).__init__(version=VERSION, **kwargs)
37
+ self.features = features_per_config[kwargs["name"]]
38
+
39
+
40
+ class Phoneme(datasets.GeneratorBasedBuilder):
41
+ # dataset versions
42
+ DEFAULT_CONFIG = "phoneme"
43
+ BUILDER_CONFIGS = [
44
+ PhonemeConfig(name="phoneme",
45
+ description="Phoneme for binary classification.")
46
+ ]
47
+
48
+
49
+ def _info(self):
50
+ info = datasets.DatasetInfo(description=DESCRIPTION, citation=_CITATION, homepage=_HOMEPAGE,
51
+ features=features_per_config[self.config.name])
52
+
53
+ return info
54
+
55
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
56
+ downloads = dl_manager.download_and_extract(urls_per_split)
57
+
58
+ return [
59
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloads["train"]})
60
+ ]
61
+
62
+ def _generate_examples(self, filepath: str):
63
+ data = pandas.read_csv(filepath)
64
+
65
+ for row_id, row in data.iterrows():
66
+ data_row = dict(row)
67
+
68
+ yield row_id, data_row