tanganke commited on
Commit
3405c30
1 Parent(s): 5381095

Delete loading script

Browse files
Files changed (1) hide show
  1. kmnist.py +0 -122
kmnist.py DELETED
@@ -1,122 +0,0 @@
1
- import struct
2
-
3
- import numpy as np
4
-
5
- import datasets
6
- from datasets.tasks import ImageClassification
7
-
8
- _CITATION = R"""
9
- @article{DBLP:journals/corr/abs-1812-01718,
10
- author = {Tarin Clanuwat and
11
- Mikel Bober{-}Irizar and
12
- Asanobu Kitamoto and
13
- Alex Lamb and
14
- Kazuaki Yamamoto and
15
- David Ha},
16
- title = {Deep Learning for Classical Japanese Literature},
17
- journal = {CoRR},
18
- volume = {abs/1812.01718},
19
- year = {2018},
20
- url = {http://arxiv.org/abs/1812.01718},
21
- eprinttype = {arXiv},
22
- eprint = {1812.01718},
23
- timestamp = {Thu, 14 Oct 2021 09:15:14 +0200},
24
- biburl = {https://dblp.org/rec/journals/corr/abs-1812-01718.bib},
25
- bibsource = {dblp computer science bibliography, https://dblp.org}
26
- }
27
- """
28
-
29
- _URL = "./raw/"
30
- _URLS = {
31
- "train_images": "train-images-idx3-ubyte.gz",
32
- "train_labels": "train-labels-idx1-ubyte.gz",
33
- "test_images": "t10k-images-idx3-ubyte.gz",
34
- "test_labels": "t10k-labels-idx1-ubyte.gz",
35
- }
36
-
37
-
38
- class KMNIST(datasets.GeneratorBasedBuilder):
39
-
40
- BUILDER_CONFIGS = [
41
- datasets.BuilderConfig(
42
- name="kmnist",
43
- version=datasets.Version("1.0.0"),
44
- )
45
- ]
46
-
47
- def _info(self):
48
- return datasets.DatasetInfo(
49
- features=datasets.Features(
50
- {
51
- "image": datasets.Image(),
52
- "label": datasets.features.ClassLabel(
53
- names=[
54
- "お",
55
- "き",
56
- "す",
57
- "つ",
58
- "な",
59
- "は",
60
- "ま",
61
- "や",
62
- "れ",
63
- "を",
64
- ]
65
- ),
66
- }
67
- ),
68
- supervised_keys=("image", "label"),
69
- homepage="https://github.com/rois-codh/kmnist",
70
- citation=_CITATION,
71
- task_templates=[
72
- ImageClassification(
73
- image_column="image",
74
- label_column="label",
75
- )
76
- ],
77
- )
78
-
79
- def _split_generators(self, dl_manager):
80
- urls_to_download = {key: _URL + fname for key, fname in _URLS.items()}
81
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
82
- return [
83
- datasets.SplitGenerator(
84
- name=datasets.Split.TRAIN,
85
- gen_kwargs={
86
- "filepath": (
87
- downloaded_files["train_images"],
88
- downloaded_files["train_labels"],
89
- ),
90
- "split": "train",
91
- },
92
- ),
93
- datasets.SplitGenerator(
94
- name=datasets.Split.TEST,
95
- gen_kwargs={
96
- "filepath": (
97
- downloaded_files["test_images"],
98
- downloaded_files["test_labels"],
99
- ),
100
- "split": "test",
101
- },
102
- ),
103
- ]
104
-
105
- def _generate_examples(self, filepath, split):
106
- """This function returns the examples in the raw form."""
107
- # Images
108
- with open(filepath[0], "rb") as f:
109
- # First 16 bytes contain some metadata
110
- _ = f.read(4)
111
- size = struct.unpack(">I", f.read(4))[0]
112
- _ = f.read(8)
113
- images = np.frombuffer(f.read(), dtype=np.uint8).reshape(size, 28, 28)
114
-
115
- # Labels
116
- with open(filepath[1], "rb") as f:
117
- # First 8 bytes contain some metadata
118
- _ = f.read(8)
119
- labels = np.frombuffer(f.read(), dtype=np.uint8)
120
-
121
- for idx in range(size):
122
- yield idx, {"image": images[idx], "label": str(labels[idx])}