system HF staff commited on
Commit
c7a59fa
1 Parent(s): 889eaa7

import from S3

Browse files
Files changed (1) hide show
  1. mnist-text-small.py +174 -0
mnist-text-small.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """Compressed MNIST text dataset."""
3
+
4
+ from __future__ import absolute_import, division, print_function
5
+
6
+ import json
7
+ import os
8
+ import math
9
+
10
+ import numpy as np
11
+ import datasets
12
+
13
+
14
+ _DESCRIPTION = """\
15
+ MNIST dataset adapted to a text-based representation.
16
+
17
+ *Modified images to be ~1/4 the original area.*
18
+ Done by taking a max pool.
19
+
20
+ This allows testing interpolation quality for Transformer-VAEs.
21
+
22
+ System is heavily inspired by Matthew Rayfield's work https://youtu.be/Z9K3cwSL6uM
23
+
24
+ Works by quantising each MNIST pixel into one of 64 characters.
25
+ Every sample has an up & down version to encourage the model to learn rotation invarient features.
26
+
27
+ Use `.array_to_text(` and `.text_to_array(` methods to test your generated data.
28
+
29
+ Data format:
30
+ - text: (16 x 14 tokens, 224 tokens total): Textual representation of MNIST digit, for example:
31
+ ```
32
+ 00 down ! ! ! ! ! ! ! ! ! ! ! ! ! !
33
+ 01 down ! ! ! ! ! ! ! ! ! ! ! ! ! !
34
+ 02 down ! ! ! ! ! ! % % C L a ^ ! !
35
+ 03 down ! ! ! - ` ` ` ` ` Y ` Q ! !
36
+ 04 down ! ! ! % ` ` ` R ^ ! ! ! ! !
37
+ 05 down ! ! ! ! $ G ` ! ! ! ! ! ! !
38
+ 06 down ! ! ! ! ! # ` Y < ! ! ! ! !
39
+ 07 down ! ! ! ! ! ! 5 ` ` F ! ! ! !
40
+ 08 down ! ! ! ! ! ! ! % ` ` 1 ! ! !
41
+ 09 down ! ! ! ! ! ! F ` ` ` ! ! ! !
42
+ 10 down ! ! ! ! 1 ` ` ` ` 4 ! ! ! !
43
+ 11 down ! ! L ` ` ` ` 5 ! ! ! ! ! !
44
+ 12 down ! ! ` ` V B ! ! ! ! ! ! ! !
45
+ 13 down ! ! ! ! ! ! ! ! ! ! ! ! ! !
46
+ ```
47
+ - label: Just a number with the texts matching label.
48
+
49
+ """
50
+
51
+ _CITATION = """\
52
+ @dataset{dataset,
53
+ author = {Fraser Greenlee},
54
+ year = {2021},
55
+ month = {1},
56
+ pages = {},
57
+ title = {MNIST small text dataset.},
58
+ doi = {}
59
+ }
60
+ """
61
+
62
+ _TRAIN_DOWNLOAD_URL = "https://raw.githubusercontent.com/Fraser-Greenlee/my-huggingface-datasets/master/data/mnist-text-small/train.json.zip"
63
+ _TEST_DOWNLOAD_URL = "https://raw.githubusercontent.com/Fraser-Greenlee/my-huggingface-datasets/master/data/mnist-text-small/test.json"
64
+
65
+ LABELS = list(range(10))
66
+ CUSTOM_METHODS = ['array_to_text', 'text_to_array']
67
+ IMG_SIZE = (16, 14)
68
+
69
+
70
+ class MnistTextSmall(datasets.GeneratorBasedBuilder):
71
+ """MNIST represented by text."""
72
+
73
+ def as_dataset(self, *args, **kwargs):
74
+ f"""
75
+ Return a Dataset for the specified split.
76
+
77
+ Modified to add custom methods {CUSTOM_METHODS} to the dataset.
78
+ This allows rendering the text as images & vice versa.
79
+ """
80
+ a_dataset = super().as_dataset(*args, **kwargs)
81
+ for method in CUSTOM_METHODS:
82
+ setattr(a_dataset, f'custom_{method}', getattr(self, method))
83
+ return a_dataset
84
+
85
+ @staticmethod
86
+ def array_to_text(pixels: np.array):
87
+ '''
88
+ Takes a 2D array of pixel brightnesses and converts them to text.
89
+ Uses 64 tokens to represent all brightness values.
90
+ '''
91
+ width = pixels.shape[0]
92
+ height = pixels.shape[1]
93
+
94
+ lines = []
95
+
96
+ for y in range(height):
97
+ split = ['%02d down' % y]
98
+
99
+ for x in range(width):
100
+ brightness = pixels[y, x]
101
+
102
+ mBrightness = math.floor(brightness * 64)
103
+ s = chr(mBrightness + 33)
104
+
105
+ split.append(s)
106
+
107
+ lines.append(' '.join(split))
108
+
109
+ reversed = []
110
+ for line in lines:
111
+ reversed.insert(0, (line.replace(' down ', ' up ', 1)))
112
+
113
+ return ['\n'.join(lines), '\n'.join(reversed)]
114
+
115
+ @staticmethod
116
+ def text_to_array(text: str):
117
+ '''
118
+ Takes a text sequences and tries to convert it into a 2D numpy array of brightnesses.
119
+ If parts of the text don't match the format they will be skipped.
120
+ '''
121
+ lines = text.split('\n')
122
+ pixels = np.zeros((IMG_SIZE[1], IMG_SIZE[0] - 2))
123
+
124
+ tokens = None
125
+ for y, line in enumerate(lines):
126
+ tokens = line.split(' ')
127
+ for i in range(2, min(IMG_SIZE[0], len(tokens))):
128
+ token = tokens[i]
129
+ if len(token) == 1:
130
+ tkn_v = (ord(token) - 33)
131
+ if tkn_v >= 0 and tkn_v <= 64:
132
+ pixels[y, i - 2] = (ord(token) - 33) / 64
133
+
134
+ if not lines:
135
+ return pixels
136
+
137
+ if tokens and len(tokens) > 1 and tokens[1] == 'up':
138
+ pixels = pixels[::-1]
139
+
140
+ return pixels
141
+
142
+ def _info(self):
143
+ return datasets.DatasetInfo(
144
+ description=_DESCRIPTION,
145
+ features=datasets.Features(
146
+ {
147
+ 'label': datasets.features.ClassLabel(names=LABELS),
148
+ 'text': datasets.Value("string"),
149
+ }
150
+ ),
151
+ homepage="https://github.com/Fraser-Greenlee/my-huggingface-datasets",
152
+ citation=_CITATION,
153
+ )
154
+
155
+ def _split_generators(self, dl_manager):
156
+ train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
157
+ test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
158
+ return [
159
+ datasets.SplitGenerator(
160
+ name=datasets.Split.TRAIN,
161
+ gen_kwargs={"filepath": os.path.join(train_path, 'train.json')}
162
+ ),
163
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
164
+ ]
165
+
166
+ def _generate_examples(self, filepath):
167
+ """Generate examples."""
168
+ with open(filepath, encoding="utf-8") as json_lines_file:
169
+ data = []
170
+ for line in json_lines_file:
171
+ data.append(json.loads(line))
172
+
173
+ for id_, row in enumerate(data):
174
+ yield id_, row