A-MNIST / A-MNIST.py
gorar's picture
Update the dataset version
da9a9d6
raw
history blame contribute delete
No virus
2.95 kB
"""Augmented MNIST Data Set"""
import struct
import numpy as np
import datasets
from datasets.tasks import ImageClassification
_DESCRIPTION = """\
The dataset is built on top of MNIST.
It consists from 130K of images in 10 classes - 120K training and 10K test samples.
The training set was augmented with additional 60K images.
"""
_URLS = {
"train_images": "data/train-images-idx3-ubyte.gz",
"train_labels": "data/train-labels-idx1-ubyte.gz",
"test_images": "data/t10k-images-idx3-ubyte.gz",
"test_labels": "data/t10k-labels-idx1-ubyte.gz",
}
class AMNIST(datasets.GeneratorBasedBuilder):
"""A-MNIST Data Set"""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="amnist",
version=datasets.Version("1.1.0"),
description=_DESCRIPTION,
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"label": datasets.features.ClassLabel(names=["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]),
}
),
supervised_keys=("image", "label"),
task_templates=[
ImageClassification(
image_column="image",
label_column="label",
)
],
)
def _split_generators(self, dl_manager):
urls_to_download = _URLS
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": [downloaded_files["train_images"],
downloaded_files["train_labels"]],
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": [downloaded_files["test_images"],
downloaded_files["test_labels"]],
"split": "test",
},
),
]
def _generate_examples(self, filepath, split):
"""This function returns the examples in the raw form."""
# Images
with open(filepath[0], "rb") as f:
# First 16 bytes contain some metadata
_ = f.read(4)
size = struct.unpack(">I", f.read(4))[0]
_ = f.read(8)
images = np.frombuffer(f.read(), dtype=np.uint8).reshape(size, 28, 28)
# Labels
with open(filepath[1], "rb") as f:
# First 8 bytes contain some metadata
_ = f.read(8)
labels = np.frombuffer(f.read(), dtype=np.uint8)
for idx in range(size):
yield idx, {"image": images[idx], "label": str(labels[idx])}