europeana_newspapers / europeana_newspapers.py
davanstrien's picture
davanstrien HF staff
use dictionary for paths
386abf8
raw
history blame
4.19 kB
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO"""
import os
import pyarrow as pa
import pyarrow.parquet as pq
from datasets import Value, Sequence
import datasets
from pathlib import Path
_DESCRIPTION = "TODO"
_HOMEPAGE = "TODO"
_LANG_CONFIGS = {"fi", "sv"}
_DATA = {
"sv": {"1900": "sv_1900.parquet", "1910": "sv_1910.parquet"},
"fi": {"1900": "fi_1900.parquet", "1910": "fi_1910.parquet"},
}
class EuropeanaNewspapersConfig(datasets.BuilderConfig):
"""BuilderConfig for the Europeana Newspapers dataset."""
def __init__(self, *args, languages=None, **kwargs):
"""BuilderConfig for the GitHub Code dataset.
Args:
languages (:obj:`List[str]`): List of languages to load.
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(
*args,
name="+".join(languages),
**kwargs,
)
self.languages = languages
class EuropeanaNewspapers(datasets.GeneratorBasedBuilder):
"""TODO."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIG_CLASS = EuropeanaNewspapersConfig
BUILDER_CONFIGS = [
EuropeanaNewspapersConfig(languages=[lang]) for lang in _LANG_CONFIGS
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text": Value(dtype="string"),
"mean_ocr": Value(dtype="float64"),
"std_ocr": Value(dtype="float64"),
"bounding_boxes": Sequence(
feature=Sequence(
feature=Value(dtype="float64", id=None),
length=-1,
),
),
"title": Value(dtype="string"),
"date": Value(dtype="string"),
"language": Sequence(
feature=Value(dtype="string", id=None),
),
"item_iiif_url": Value(
dtype="string",
),
# "multi_language": Value(dtype="bool"),
"issue_uri": Value(dtype="string"),
"id": Value(dtype="string"),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license="Multiple: see the 'license' field of each sample.",
)
def _split_generators(self, dl_manager):
# parquet_files = list(Path(".").rglob("*.parquet"))
languages = self.config.languages
data_files = []
for language in languages:
for decade in _DATA[language].values():
data_files.append(decade)
files = dl_manager.download(data_files)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": files,
},
),
]
def _generate_examples(self, files):
key = 0
for file in files:
with open(file, "rb") as f:
parquet_file = pq.ParquetFile(f)
for record_batch in parquet_file.iter_batches(batch_size=10_000):
pa_table = pa.Table.from_batches([record_batch])
rows = pa_table.to_pylist()
for row in rows:
row.pop("multi_language")
yield key, row
key += 1