File size: 4,972 Bytes
c39e419
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50a59a9
c39e419
 
 
 
 
 
50a59a9
 
 
 
c39e419
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50a59a9
c39e419
 
 
 
 
50a59a9
c39e419
 
 
50a59a9
c39e419
 
 
 
 
 
 
 
50a59a9
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset of illustrated and non illustrated 19th Century newspaper ads."""

import zipfile
from pathlib import Path

import datasets
import requests
from PIL import Image

# Some of the images are large so we set this to avoid Decompression bomb warnings

Image.MAX_IMAGE_PIXELS = None

_CITATION = """\
@dataset{seuret_mathias_2019_3366686,
  author       = {Seuret, Mathias and
                  Limbach, Saskia and
                  Weichselbaumer, Nikolaus and
                  Maier, Andreas and
                  Christlein, Vincent},
  title        = {{Dataset of Pages from Early Printed Books with 
                   Multiple Font Groups}},
  month        = aug,
  year         = 2019,
  publisher    = {Zenodo},
  version      = {1.0.0},
  doi          = {10.5281/zenodo.3366686},
  url          = {https://doi.org/10.5281/zenodo.3366686}
}
"""

_DESCRIPTION = """\
This dataset is composed of photos of various resolution of 35'623 pages of printed books dating from the 15th to the 18th century. Each page has been attributed by experts from one to five labels corresponding to the font groups used in the text, with two extra-classes for non-textual content and fonts not present in the following list:  Antiqua, Bastarda, Fraktur, Gotico Antiqua, Greek, Hebrew, Italic, Rotunda, Schwabacher, and Textura.
"""

_HOMEPAGE = "https://doi.org/10.5281/zenodo.3366686"
_LICENSE = "Creative Commons Attribution Non Commercial Share Alike 4.0 International"

ZENDO_REPO_ID = 3366686
ZENODO_API_URL = f"https://zenodo.org/api/records/{ZENDO_REPO_ID}"


class EarlyBookFonts(datasets.GeneratorBasedBuilder):
    """Early printed book fonts detection dataset"""

    VERSION = datasets.Version("1.1.0")

    def _info(self):
        features = datasets.Features(
            {
                "image": datasets.Image(),
                "labels": datasets.Sequence(
                    datasets.ClassLabel(
                        names=[
                            "greek",
                            "antiqua",
                            "other_font",
                            "not_a_font",
                            "italic",
                            "rotunda",
                            "textura",
                            "fraktur",
                            "schwabacher",
                            "hebrew",
                            "bastarda",
                            "gotico_antiqua",
                        ]
                    )
                ),
            }
        )

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        zenodo_record = requests.get(ZENODO_API_URL).json()
        urls = sorted(
            file["links"]["self"]
            for file in zenodo_record["files"]
            if file["type"] == "zip"
        )
        *image_urls, label_url = urls
        labels = dl_manager.download(label_url)
        images = dl_manager.download_and_extract(image_urls)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"images": images, "labels": labels, "split": "training"},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"images": images, "labels": labels, "split": "test"},
            ),
        ]

    def _generate_examples(self, images, labels, split):
        mapping = {}
        for directory in images:
            for file in Path(directory).rglob("*"):
                mapping["/".join(file.parts[-2:])] = file
        with zipfile.ZipFile(labels) as labelzip:
            with labelzip.open(f"labels-{split}.csv") as label_csv:
                for id_, row in enumerate(label_csv.readlines()):
                    row = row.decode("utf-8")
                    filename, *labels = row.split(",")
                    labels = [label.strip("\n") for label in labels]
                    labels = [label for label in labels if label != "-"]
                    filename = mapping[filename]

                    image = Image.open(filename)
                    yield id_, {"image": image, "labels": labels}