davanstrien HF staff commited on
Commit
c39e419
1 Parent(s): e1d8950

draft loading script

Browse files
Files changed (1) hide show
  1. early_printed_books_font_detection.py +125 -0
early_printed_books_font_detection.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Dataset of illustrated and non illustrated 19th Century newspaper ads."""
15
+
16
+ import os
17
+ from pathlib import Path
18
+
19
+ import datasets
20
+ import requests
21
+ from PIL import Image
22
+
23
+ _CITATION = """\
24
+ @dataset{seuret_mathias_2019_3366686,
25
+ author = {Seuret, Mathias and
26
+ Limbach, Saskia and
27
+ Weichselbaumer, Nikolaus and
28
+ Maier, Andreas and
29
+ Christlein, Vincent},
30
+ title = {{Dataset of Pages from Early Printed Books with
31
+ Multiple Font Groups}},
32
+ month = aug,
33
+ year = 2019,
34
+ publisher = {Zenodo},
35
+ version = {1.0.0},
36
+ doi = {10.5281/zenodo.3366686},
37
+ url = {https://doi.org/10.5281/zenodo.3366686}
38
+ }
39
+ """
40
+
41
+ _DESCRIPTION = """\
42
+ This dataset is composed of photos of various resolution of 35'623 pages of printed books dating from the 15th to the 18th century. Each page has been attributed by experts from one to five labels corresponding to the font groups used in the text, with two extra-classes for non-textual content and fonts not present in the following list: Antiqua, Bastarda, Fraktur, Gotico Antiqua, Greek, Hebrew, Italic, Rotunda, Schwabacher, and Textura.
43
+ """
44
+
45
+ _HOMEPAGE = "https://doi.org/10.5281/zenodo.3366686"
46
+ _LICENSE = "Creative Commons Attribution Non Commercial Share Alike 4.0 International"
47
+
48
+ ZENDO_REPO_ID = 3366686
49
+ ZENODO_API_URL = f"https://zenodo.org/api/records/{ZENDO_REPO_ID}"
50
+
51
+
52
+ class EarlyBookFonts(datasets.GeneratorBasedBuilder):
53
+ """Early printed book fonts detection dataset"""
54
+
55
+ VERSION = datasets.Version("1.1.0")
56
+
57
+ def _info(self):
58
+ features = datasets.Features(
59
+ {
60
+ "image": datasets.Image(),
61
+ "labels": datasets.Sequence(
62
+ datasets.ClassLabel(
63
+ names=[
64
+ "greek",
65
+ "antiqua",
66
+ "other_font",
67
+ "not_a_font",
68
+ "italic",
69
+ "rotunda",
70
+ "textura",
71
+ "fraktur",
72
+ "schwabacher",
73
+ "hebrew",
74
+ "bastarda",
75
+ "gotico_antiqua",
76
+ ]
77
+ )
78
+ ),
79
+ }
80
+ )
81
+
82
+ return datasets.DatasetInfo(
83
+ description=_DESCRIPTION,
84
+ features=features,
85
+ homepage=_HOMEPAGE,
86
+ license=_LICENSE,
87
+ citation=_CITATION,
88
+ )
89
+
90
+ def _split_generators(self, dl_manager):
91
+ zenodo_record = requests.get(ZENODO_API_URL).json()
92
+ urls = sorted(
93
+ file["links"]["self"]
94
+ for file in zenodo_record["files"]
95
+ if file["type"] == "zip"
96
+ )
97
+ *image_urls, label_url = urls
98
+ config = datasets.DownloadConfig()
99
+ labels = dl_manager.download_and_extract(label_url)
100
+ images = dl_manager.download_and_extract(image_urls)
101
+
102
+ return [
103
+ datasets.SplitGenerator(
104
+ name=datasets.Split.TRAIN,
105
+ gen_kwargs={"images": images, "labels": os.path.join(labels), "split": "training"},
106
+ ),
107
+ datasets.SplitGenerator(
108
+ name=datasets.Split.TEST,
109
+ gen_kwargs={"images": images, "labels": os.path.join(labels), "split": "test"},
110
+ ),
111
+ ]
112
+
113
+ def _generate_examples(self, images, labels, split):
114
+ mapping = {}
115
+ for directory in images:
116
+ for file in Path(directory).rglob("*"):
117
+ mapping["/".join(file.parts[-2:])] = file
118
+ with open(f"labels-{split}.csv", 'r') as label_csv:
119
+ for id_, row in enumerate(label_csv.readlines()):
120
+ filename, *labels = row.split(",")
121
+ labels = [label.strip("\n") for label in labels]
122
+ labels = [label for label in labels if label != '-']
123
+ filename = mapping[filename]
124
+ image = Image.open(filename)
125
+ yield id_, {"image": image, "labels": labels}