mstz commited on
Commit
36403f4
1 Parent(s): b7b5a97

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +28 -1
  2. spambase.data +0 -0
  3. spambase.py +127 -0
README.md CHANGED
@@ -1,3 +1,30 @@
1
  ---
2
- license: cc-by-4.0
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language:
3
+ - en
4
+ tags:
5
+ - spambase
6
+ - tabular_classification
7
+ - binary_classification
8
+ pretty_name: Spambase
9
+ size_categories:
10
+ - 1K<n<10K
11
+ task_categories: # Full list at https://github.com/huggingface/hub-docs/blob/main/js/src/lib/interfaces/Types.ts
12
+ - tabular-classification
13
+ configs:
14
+ - spambase
15
  ---
16
+ # Spambase
17
+ The [Spambase dataset](https://archive.ics.uci.edu/ml/datasets/Spambase) from the [UCI ML repository](https://archive.ics.uci.edu/ml/datasets).
18
+
19
+ # Configurations and tasks
20
+ | **Configuration** | **Task** | **Description** |
21
+ |-------------------|---------------------------|------------------|
22
+ | spambase | Binary classification | Is the mail spam?|
23
+
24
+
25
+ # Usage
26
+ ```python
27
+ from datasets import load_dataset
28
+
29
+ dataset = load_dataset("mstz/spambase", "spambase")["train"]
30
+ ```
spambase.data ADDED
The diff for this file is too large to render. See raw diff
 
spambase.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Spambase: A Census Dataset"""
2
+
3
+ from typing import List
4
+
5
+ import datasets
6
+
7
+ import pandas
8
+
9
+
10
+ VERSION = datasets.Version("1.0.0")
11
+
12
+ DESCRIPTION = "Spambase dataset from the UCI ML repository."
13
+ _HOMEPAGE = "https://archive.ics.uci.edu/ml/datasets/Spambase"
14
+ _URLS = ("https://archive.ics.uci.edu/ml/datasets/Spambase")
15
+ _CITATION = """
16
+ @misc{misc_spambase_94,
17
+ author = {Hopkins,Mark, Reeber,Erik, Forman,George & Suermondt,Jaap},
18
+ title = {{Spambase}},
19
+ year = {1999},
20
+ howpublished = {UCI Machine Learning Repository},
21
+ note = {{DOI}: \\url{10.24432/C53G6X}}
22
+ }"""
23
+
24
+ # Dataset info
25
+ urls_per_split = {
26
+ "train": "https://huggingface.co/datasets/mstz/spambase/raw/main/spambase.data"
27
+ }
28
+ features_types_per_config = {
29
+ "spambase": {
30
+ "word_freq_make": datasets.Value("float64"),
31
+ "word_freq_address": datasets.Value("float64"),
32
+ "word_freq_all": datasets.Value("float64"),
33
+ "word_freq_3d": datasets.Value("float64"),
34
+ "word_freq_our": datasets.Value("float64"),
35
+ "word_freq_over": datasets.Value("float64"),
36
+ "word_freq_remove": datasets.Value("float64"),
37
+ "word_freq_internet": datasets.Value("float64"),
38
+ "word_freq_order": datasets.Value("float64"),
39
+ "word_freq_mail": datasets.Value("float64"),
40
+ "word_freq_receive": datasets.Value("float64"),
41
+ "word_freq_will": datasets.Value("float64"),
42
+ "word_freq_people": datasets.Value("float64"),
43
+ "word_freq_report": datasets.Value("float64"),
44
+ "word_freq_addresses": datasets.Value("float64"),
45
+ "word_freq_free": datasets.Value("float64"),
46
+ "word_freq_business": datasets.Value("float64"),
47
+ "word_freq_email": datasets.Value("float64"),
48
+ "word_freq_you": datasets.Value("float64"),
49
+ "word_freq_credit": datasets.Value("float64"),
50
+ "word_freq_your": datasets.Value("float64"),
51
+ "word_freq_font": datasets.Value("float64"),
52
+ "word_freq_000": datasets.Value("float64"),
53
+ "word_freq_money": datasets.Value("float64"),
54
+ "word_freq_hp": datasets.Value("float64"),
55
+ "word_freq_hpl": datasets.Value("float64"),
56
+ "word_freq_george": datasets.Value("float64"),
57
+ "word_freq_650": datasets.Value("float64"),
58
+ "word_freq_lab": datasets.Value("float64"),
59
+ "word_freq_labs": datasets.Value("float64"),
60
+ "word_freq_telnet": datasets.Value("float64"),
61
+ "word_freq_857": datasets.Value("float64"),
62
+ "word_freq_data": datasets.Value("float64"),
63
+ "word_freq_415": datasets.Value("float64"),
64
+ "word_freq_85": datasets.Value("float64"),
65
+ "word_freq_technology": datasets.Value("float64"),
66
+ "word_freq_1999": datasets.Value("float64"),
67
+ "word_freq_parts": datasets.Value("float64"),
68
+ "word_freq_pm": datasets.Value("float64"),
69
+ "word_freq_direct": datasets.Value("float64"),
70
+ "word_freq_cs": datasets.Value("float64"),
71
+ "word_freq_meeting": datasets.Value("float64"),
72
+ "word_freq_original": datasets.Value("float64"),
73
+ "word_freq_project": datasets.Value("float64"),
74
+ "word_freq_re": datasets.Value("float64"),
75
+ "word_freq_edu": datasets.Value("float64"),
76
+ "word_freq_table": datasets.Value("float64"),
77
+ "word_freq_conference": datasets.Value("float64"),
78
+ "char_freq_": datasets.Value("float64"),
79
+ "char_freq_": datasets.Value("float64"),
80
+ "char_freq_": datasets.Value("float64"),
81
+ "char_freq_": datasets.Value("float64"),
82
+ "char_freq_": datasets.Value("float64"),
83
+ "char_freq_": datasets.Value("float64"),
84
+ "capital_run_length_average": datasets.Value("float64"),
85
+ "capital_run_length_longest": datasets.Value("float64"),
86
+ "capital_run_length_total": datasets.Value("float64"),
87
+ "is_spam": datasets.ClassLabel(num_classes=2, names=("no", "yes"))
88
+ },
89
+
90
+ }
91
+ features_per_config = {k: datasets.Features(features_types_per_config[k]) for k in features_types_per_config}
92
+
93
+
94
+ class SpambaseConfig(datasets.BuilderConfig):
95
+ def __init__(self, **kwargs):
96
+ super(SpambaseConfig, self).__init__(version=VERSION, **kwargs)
97
+ self.features = features_per_config[kwargs["name"]]
98
+
99
+
100
+ class Spambase(datasets.GeneratorBasedBuilder):
101
+ # dataset versions
102
+ DEFAULT_CONFIG = "spambase"
103
+ BUILDER_CONFIGS = [
104
+ SpambaseConfig(name="spambase",
105
+ description="Spambase for binary classification.")
106
+ ]
107
+
108
+ def _info(self):
109
+ info = datasets.DatasetInfo(description=DESCRIPTION, citation=_CITATION, homepage=_HOMEPAGE,
110
+ features=features_per_config[self.config.name])
111
+
112
+ return info
113
+
114
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
115
+ downloads = dl_manager.download_and_extract(urls_per_split)
116
+
117
+ return [
118
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloads["train"]})
119
+ ]
120
+
121
+ def _generate_examples(self, filepath: str):
122
+ data = pandas.read_csv(filepath)
123
+
124
+ for row_id, row in data.iterrows():
125
+ data_row = dict(row)
126
+
127
+ yield row_id, data_row