davanstrien HF staff commited on
Commit
7da8fe2
1 Parent(s): f1f85de

upload dataset script

Browse files
Files changed (1) hide show
  1. illustrated_ads.py +141 -0
illustrated_ads.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Dataset of illustrated and non illustrated 19th Century newspaper ads."""
15
+
16
+ import ast
17
+ import pandas as pd
18
+ import datasets
19
+ from PIL import Image
20
+ from pathlib import Path
21
+
22
+ # TODO: Add BibTeX citation
23
+ # Find for instance the citation on arxiv or on the dataset repo/website
24
+ _CITATION = """\
25
+ @dataset{van_strien_daniel_2021_5838410,
26
+ author = {van Strien, Daniel},
27
+ title = {{19th Century United States Newspaper Advert images
28
+ with 'illustrated' or 'non illustrated' labels}},
29
+ month = oct,
30
+ year = 2021,
31
+ publisher = {Zenodo},
32
+ version = {0.0.1},
33
+ doi = {10.5281/zenodo.5838410},
34
+ url = {https://doi.org/10.5281/zenodo.5838410}}
35
+ """
36
+
37
+
38
+ _DESCRIPTION = """\
39
+ The Dataset contains images derived from the Newspaper Navigator (news-navigator.labs.loc.gov/), a dataset of images drawn from the Library of Congress Chronicling America collection.
40
+ """
41
+
42
+ _HOMEPAGE = "https://doi.org/10.5281/zenodo.5838410"
43
+
44
+ _LICENSE = "Public Domain"
45
+
46
+
47
+ _URLS = "https://zenodo.org/record/5838410/files/images.zip?download=1"
48
+
49
+
50
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
51
+ class IllustratedAds(datasets.GeneratorBasedBuilder):
52
+ """TODO: Short description of my dataset."""
53
+
54
+ VERSION = datasets.Version("1.1.0")
55
+
56
+ def _info(self):
57
+
58
+ features = datasets.Features(
59
+ {
60
+ "file": datasets.Value("string"),
61
+ "image": datasets.Image(),
62
+ "label": datasets.ClassLabel(names=["text-only", "illustrations"]),
63
+ "pub_date": datasets.Value("timestamp[ns]"),
64
+ "page_seq_num": datasets.Value("int64"),
65
+ "edition_seq_num": datasets.Value("int64"),
66
+ "batch": datasets.Value("string"),
67
+ "lccn": datasets.Value("string"),
68
+ "box": datasets.Sequence(datasets.Value("float32")),
69
+ "score": datasets.Value("float64"),
70
+ "ocr": datasets.Value("string"),
71
+ "place_of_publication": datasets.Value("string"),
72
+ "geographic_coverage": datasets.Value("string"),
73
+ "name": datasets.Value("string"),
74
+ "publisher": datasets.Value("string"),
75
+ "url": datasets.Value("string"),
76
+ "page_url": datasets.Value("string"),
77
+ }
78
+ )
79
+
80
+ return datasets.DatasetInfo(
81
+ # This is the description that will appear on the datasets page.
82
+ description=_DESCRIPTION,
83
+ # This defines the different columns of the dataset and their types
84
+ features=features, # Here we define them above because they are different between the two configurations
85
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
86
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
87
+ # supervised_keys=("sentence", "label"),
88
+ # Homepage of the dataset for documentation
89
+ homepage=_HOMEPAGE,
90
+ # License for the dataset if available
91
+ license=_LICENSE,
92
+ # Citation for the dataset
93
+ citation=_CITATION,
94
+ )
95
+
96
+ def _split_generators(self, dl_manager):
97
+ data_dir = dl_manager.download_and_extract(_URLS)
98
+ return [
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.TRAIN,
101
+ gen_kwargs={
102
+ "data_dir": Path(data_dir),
103
+ },
104
+ ),
105
+ ]
106
+
107
+ def _generate_examples(self, data_dir):
108
+ dtypes = {
109
+ "page_seq_num": "int64",
110
+ "edition_seq_num": "int64",
111
+ "batch": "string",
112
+ "lccn": "string",
113
+ "score": "float64",
114
+ "place_of_publication": "string",
115
+ "name": "string",
116
+ "publisher": "string",
117
+ "url": "string",
118
+ "page_url": "string",
119
+ }
120
+ df_labels = pd.read_csv(
121
+ "https://zenodo.org/record/5838410/files/ads.csv?download=1", index_col=0
122
+ )
123
+ df_metadata = pd.read_csv(
124
+ "https://zenodo.org/record/5838410/files/sample.csv?download=1",
125
+ index_col=0,
126
+ dtype=dtypes,
127
+ )
128
+ df_metadata["file"] = df_metadata.filepath.str.replace("/", "_")
129
+ df_metadata = df_metadata.set_index("file", drop=True)
130
+ df = df_labels.join(df_metadata)
131
+ df = df.reset_index()
132
+ data = df.to_dict(orient="records")
133
+ for id_, row in enumerate(data):
134
+ box = ast.literal_eval(row["box"])
135
+ row["box"] = box
136
+ row.pop("filepath")
137
+ ocr = " ".join(ast.literal_eval(row["ocr"]))
138
+ row["ocr"] = ocr
139
+ image = row["file"]
140
+ row["image"] = Image.open(Path(data_dir / image))
141
+ yield id_, row