davanstrien HF staff commited on
Commit
cc12d95
1 Parent(s): 03eec47

draft loading script

Browse files
Files changed (1) hide show
  1. europeana_newspapers.py +124 -0
europeana_newspapers.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """TODO"""
16
+
17
+ import os
18
+
19
+ import pyarrow as pa
20
+ import pyarrow.parquet as pq
21
+ from datasets import Value, Sequence
22
+ import datasets
23
+ from pathlib import Path
24
+
25
+ _DESCRIPTION = "TODO"
26
+
27
+ _HOMEPAGE = "TODO"
28
+
29
+ _LANG_CONFIGS = {"fi", "sv"}
30
+
31
+
32
+ class EuropeanaNewspapersConfig(datasets.BuilderConfig):
33
+ """BuilderConfig for the Europeana Newspapers dataset."""
34
+
35
+ def __init__(self, *args, languages=None, **kwargs):
36
+ """BuilderConfig for the GitHub Code dataset.
37
+
38
+ Args:
39
+ languages (:obj:`List[str]`): List of languages to load.
40
+ **kwargs: keyword arguments forwarded to super.
41
+ """
42
+
43
+ super().__init__(
44
+ *args,
45
+ name="+".join(languages),
46
+ **kwargs,
47
+ )
48
+ self.languages = languages
49
+
50
+
51
+ class EuropeanaNewspapers(datasets.GeneratorBasedBuilder):
52
+ """TODO."""
53
+
54
+ VERSION = datasets.Version("1.0.0")
55
+
56
+ BUILDER_CONFIG_CLASS = EuropeanaNewspapersConfig
57
+ BUILDER_CONFIGS = [
58
+ EuropeanaNewspapersConfig(languages=[lang]) for lang in _LANG_CONFIGS
59
+ ]
60
+
61
+ def _info(self):
62
+ return datasets.DatasetInfo(
63
+ description=_DESCRIPTION,
64
+ features=datasets.Features(
65
+ {
66
+ "text": Value(dtype="string"),
67
+ "mean_ocr": Value(dtype="float64"),
68
+ "std_ocr": Value(dtype="float64"),
69
+ "bounding_boxes": Sequence(
70
+ feature=Sequence(
71
+ feature=Value(dtype="float64", id=None),
72
+ length=-1,
73
+ ),
74
+ ),
75
+ "title": Value(dtype="string"),
76
+ "date": Value(dtype="string"),
77
+ "language": Sequence(
78
+ feature=Value(dtype="string", id=None),
79
+ ),
80
+ "item_iiif_url": Value(
81
+ dtype="string",
82
+ ),
83
+ # "multi_language": Value(dtype="bool"),
84
+ "issue_uri": Value(dtype="string"),
85
+ "id": Value(dtype="string"),
86
+ }
87
+ ),
88
+ supervised_keys=None,
89
+ homepage=_HOMEPAGE,
90
+ license="Multiple: see the 'license' field of each sample.",
91
+ )
92
+
93
+ def _split_generators(self, dl_manager):
94
+ parquet_files = list(Path(".").rglob("*.parquet"))
95
+ languages = self.config.languages
96
+ data_files = [
97
+ parquet_file
98
+ for parquet_file in parquet_files
99
+ if parquet_file.stem.split("_")[0] in languages
100
+ ]
101
+
102
+ files = dl_manager.download(data_files)
103
+ return [
104
+ datasets.SplitGenerator(
105
+ name=datasets.Split.TRAIN,
106
+ gen_kwargs={
107
+ "files": files,
108
+ },
109
+ ),
110
+ ]
111
+
112
+ def _generate_examples(self, files):
113
+ key = 0
114
+ for file in files:
115
+ with open(file, "rb") as f:
116
+ parquet_file = pq.ParquetFile(f)
117
+ for record_batch in parquet_file.iter_batches(batch_size=10_000):
118
+ pa_table = pa.Table.from_batches([record_batch])
119
+ rows = pa_table.to_pylist()
120
+ for row in rows:
121
+
122
+ row.pop("multi_language")
123
+ yield key, row
124
+ key += 1