tanganke commited on
Commit
50aa98f
1 Parent(s): abe16fe

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ dataset_info:
3
+ config_name: emnist-letters
4
+ features:
5
+ - name: image
6
+ dtype: image
7
+ - name: label
8
+ dtype:
9
+ class_label:
10
+ names:
11
+ '0': A
12
+ '1': B
13
+ '2': C
14
+ '3': D
15
+ '4': E
16
+ '5': F
17
+ '6': G
18
+ '7': H
19
+ '8': I
20
+ '9': J
21
+ '10': K
22
+ '11': L
23
+ '12': M
24
+ '13': N
25
+ '14': O
26
+ '15': P
27
+ '16': Q
28
+ '17': R
29
+ '18': S
30
+ '19': T
31
+ '20': U
32
+ '21': V
33
+ '22': W
34
+ '23': X
35
+ '24': Y
36
+ '25': Z
37
+ splits:
38
+ - name: train
39
+ num_bytes: 52628800
40
+ num_examples: 124800
41
+ - name: test
42
+ num_bytes: 8775753
43
+ num_examples: 20800
44
+ download_size: 36381774
45
+ dataset_size: 61404553
46
+ ---
47
+
48
+ # Dataset Card for "emnist-letters"
49
+
50
+ ## Dataset Information
51
+
52
+ The `emnist-letters` dataset is a set of images of handwritten letters.
53
+ The dataset is split into a training set and a test set.
54
+
55
+ ## Data Fields
56
+
57
+ - `image`: The image of the handwritten letter. The data type of this field is `image`.
58
+ - `label`: The label of the handwritten letter. The data type of this field is `class_label`, and it can take on the values 'A' to 'Z'.
59
+
60
+ ## Data Splits
61
+
62
+ - `train`: The training set consists of 124800 examples, with a total size of 52628800 bytes.
63
+ - `test`: The test set consists of 20800 examples, with a total size of 8775753 bytes.
emnist-letters-mapping.txt ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1 65 97
2
+ 2 66 98
3
+ 3 67 99
4
+ 4 68 100
5
+ 5 69 101
6
+ 6 70 102
7
+ 7 71 103
8
+ 8 72 104
9
+ 9 73 105
10
+ 10 74 106
11
+ 11 75 107
12
+ 12 76 108
13
+ 13 77 109
14
+ 14 78 110
15
+ 15 79 111
16
+ 16 80 112
17
+ 17 81 113
18
+ 18 82 114
19
+ 19 83 115
20
+ 20 84 116
21
+ 21 85 117
22
+ 22 86 118
23
+ 23 87 119
24
+ 24 88 120
25
+ 25 89 121
26
+ 26 90 122
emnist_letters.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import struct
2
+
3
+ import numpy as np
4
+
5
+ import datasets
6
+ from datasets.tasks import ImageClassification
7
+
8
+
9
+ _URL = "./raw/"
10
+ _URLS = {
11
+ "train_images": "emnist-letters-train-images-idx3-ubyte.gz",
12
+ "train_labels": "emnist-letters-train-labels-idx1-ubyte.gz",
13
+ "test_images": "emnist-letters-test-images-idx3-ubyte.gz",
14
+ "test_labels": "emnist-letters-test-labels-idx1-ubyte.gz",
15
+ }
16
+
17
+
18
+ class EMNIST(datasets.GeneratorBasedBuilder):
19
+
20
+ BUILDER_CONFIGS = [
21
+ datasets.BuilderConfig(
22
+ name="emnist-letters",
23
+ version=datasets.Version("1.0.0"),
24
+ )
25
+ ]
26
+
27
+ def _info(self):
28
+ return datasets.DatasetInfo(
29
+ features=datasets.Features(
30
+ {
31
+ "image": datasets.Image(),
32
+ "label": datasets.features.ClassLabel(
33
+ names=list(chr(i) for i in range(65, 91))
34
+ ),
35
+ }
36
+ ),
37
+ supervised_keys=("image", "label"),
38
+ task_templates=[
39
+ ImageClassification(
40
+ image_column="image",
41
+ label_column="label",
42
+ )
43
+ ],
44
+ )
45
+
46
+ def _split_generators(self, dl_manager):
47
+ urls_to_download = {key: _URL + fname for key, fname in _URLS.items()}
48
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
49
+ return [
50
+ datasets.SplitGenerator(
51
+ name=datasets.Split.TRAIN,
52
+ gen_kwargs={
53
+ "filepath": (
54
+ downloaded_files["train_images"],
55
+ downloaded_files["train_labels"],
56
+ ),
57
+ "split": "train",
58
+ },
59
+ ),
60
+ datasets.SplitGenerator(
61
+ name=datasets.Split.TEST,
62
+ gen_kwargs={
63
+ "filepath": (
64
+ downloaded_files["test_images"],
65
+ downloaded_files["test_labels"],
66
+ ),
67
+ "split": "test",
68
+ },
69
+ ),
70
+ ]
71
+
72
+ def _generate_examples(self, filepath, split):
73
+ """This function returns the examples in the raw form."""
74
+ # Images
75
+ with open(filepath[0], "rb") as f:
76
+ # First 16 bytes contain some metadata
77
+ _ = f.read(4)
78
+ size = struct.unpack(">I", f.read(4))[0]
79
+ _ = f.read(8)
80
+ images = np.frombuffer(f.read(), dtype=np.uint8).reshape(size, 28, 28)
81
+
82
+ # Labels
83
+ with open(filepath[1], "rb") as f:
84
+ # First 8 bytes contain some metadata
85
+ _ = f.read(8)
86
+ labels = np.frombuffer(f.read(), dtype=np.uint8) - 1
87
+
88
+ for idx in range(size):
89
+ yield idx, {"image": images[idx], "label": str(labels[idx])}
raw/emnist-letters-test-images-idx3-ubyte.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:201c0a59fa093429287ae63a69d551fc68e07848e076d7c75b14e99d21985c4f
3
+ size 5105171
raw/emnist-letters-test-labels-idx1-ubyte.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c27a54187ec374e126764d8a67d8db4584829e07dcec40ed9da015112fb3866
3
+ size 144
raw/emnist-letters-train-images-idx3-ubyte.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58aec6425acf8636ad64ecec86a1514a0519e08d10fa7054d387862b44845d6c
3
+ size 31197176
raw/emnist-letters-train-labels-idx1-ubyte.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2e54aeb01b0583309766385d163418cdcefafa9a5f76dfa4953b41b52cbe40e
3
+ size 79283