File size: 2,630 Bytes
097b5e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
929e342
 
097b5e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74335ca
097b5e7
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
"""Dexter Dataset"""

from typing import List
from functools import partial

import datasets

import pandas


VERSION = datasets.Version("1.0.0")

_ENCODING_DICS = {
}

DESCRIPTION = "Dexter dataset."
_HOMEPAGE = "https://archive-beta.ics.uci.edu/dataset/168/dexter"
_URLS = ("https://archive-beta.ics.uci.edu/dataset/168/dexter")
_CITATION = """
@misc{misc_dexter_168,
  author       = {Guyon,Isabelle, Gunn,Steve, Ben-Hur,Asa & Dror,Gideon},
  title        = {{Dexter}},
  year         = {2008},
  howpublished = {UCI Machine Learning Repository},
  note         = {{DOI}: \\url{10.24432/C5P898}}
}
"""

# Dataset info
urls_per_split = {
	"train": "https://huggingface.co/datasets/mstz/dexter/resolve/main/dexter.data"
}
features_types_per_config = {
	"dexter": {f"feature_{i}": datasets.Value("float64") for i in range(20000)}
}
features_types_per_config["dexter"]["class"] = datasets.ClassLabel(num_classes=2)
features_per_config = {k: datasets.Features(features_types_per_config[k]) for k in features_types_per_config}


class DexterConfig(datasets.BuilderConfig):
	def __init__(self, **kwargs):
		super(DexterConfig, self).__init__(version=VERSION, **kwargs)
		self.features = features_per_config[kwargs["name"]]


class Dexter(datasets.GeneratorBasedBuilder):
	# dataset versions
	DEFAULT_CONFIG = "dexter"
	BUILDER_CONFIGS = [DexterConfig(name="dexter", description="Dexter for binary classification.")]


	def _info(self):
		info = datasets.DatasetInfo(description=DESCRIPTION, citation=_CITATION, homepage=_HOMEPAGE,
									features=features_per_config[self.config.name])

		return info
	
	def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
		downloads = dl_manager.download_and_extract(urls_per_split)

		return [
			datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloads["train"]}),
		]
	
	def _generate_examples(self, filepath: str):
		data = pandas.read_csv(filepath)
		data = self.preprocess(data)

		for row_id, row in data.iterrows():
			data_row = dict(row)

			yield row_id, data_row

	def preprocess(self, data: pandas.DataFrame) -> pandas.DataFrame:
		data.columns = [f"feature_{i}" for i in range(20000)] + ["class"]
		print(data.columns)

		for feature in _ENCODING_DICS:
			encoding_function = partial(self.encode, feature)
			data.loc[:, feature] = data[feature].apply(encoding_function)
				
		return data[list(features_types_per_config[self.config.name].keys())]

	def encode(self, feature, value):
		if feature in _ENCODING_DICS:
			return _ENCODING_DICS[feature][value]
		raise ValueError(f"Unknown feature: {feature}")