Datasets:
File size: 5,409 Bytes
30dd125 bcafdef 30dd125 bcafdef 30dd125 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
"""EU Wikipedias"""
import json
import datasets
from huggingface_hub.file_download import hf_hub_url
try:
import lzma as xz
except ImportError:
import pylzma as xz
datasets.logging.set_verbosity_info()
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@ONLINE {wikidump,
author = {Wikimedia Foundation},
title = {Wikimedia Downloads},
url = {https://dumps.wikimedia.org}
}
"""
_DESCRIPTION = """\
Wikipedia dataset containing cleaned articles of all languages.
The datasets are built from the Wikipedia dump
(https://dumps.wikimedia.org/) with one split per language. Each example
contains the content of one full Wikipedia article with cleaning to strip
markdown and unwanted sections (references, etc.).
"""
_LICENSE = (
"This work is licensed under the Creative Commons Attribution-ShareAlike "
"3.0 Unported License. To view a copy of this license, visit "
"http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to "
"Creative Commons, PO Box 1866, Mountain View, CA 94042, USA."
)
_URL = "https://huggingface.co/datasets/joelito/EU_Wikipedias"
_LANGUAGES = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr",
"hu", "it", "lt", "lv", "mt", "nl", "pl", "pt", "ro", "sk", "sl", "sv"]
_DATES = ["20221120"] # one can add more in the future with the file prepare_wikipedias.py
# IMPORTANT: Increase this once larger datasets are available (English has 11 in 20221120)
_HIGHEST_NUMBER_OF_SHARDS = 11
class EUWikipediasConfig(datasets.BuilderConfig):
"""BuilderConfig for EUWikipedias."""
def __init__(self, date=None, language=None, **kwargs):
"""BuilderConfig for EUWikipedias.
Args:
language: string, the language code for the Wikipedia dump to use:
One of bg,cs,da,de,el,en,es,et,fi,fr,ga,hr,hu,it,lt,lv,mt,nl,pl,pt,ro,sk,sl,sv or all
date: string, date of the Wikipedia dump in YYYYMMDD format. A list of
available dates can be found at https://dumps.wikimedia.org/enwiki/.
**kwargs: keyword arguments forwarded to super.
"""
if date not in _DATES:
raise ValueError(f"date must be one of {_DATES} but was `{date}`")
if language not in _LANGUAGES + ["all"]:
raise ValueError(f"language must be one of {_LANGUAGES} but was `{language}`")
super().__init__(
name=f"{date}.{language}",
description=f"Wikipedia dataset for {language}, parsed from {date} dump.",
**kwargs,
)
self.date = date
self.language = language
class EUWikipedias(datasets.GeneratorBasedBuilder):
"""EUWikipedias: A dataset of Wikipedias in the EU languages"""
BUILDER_CONFIG_CLASS = EUWikipediasConfig
BUILDER_CONFIGS = [EUWikipediasConfig(date=date, language=language)
for language in _LANGUAGES + ["all"]
for date in _DATES]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"language": datasets.Value("string"),
"id": datasets.Value("string"),
"url": datasets.Value("string"),
"title": datasets.Value("string"),
"text": datasets.Value("string"),
}
),
supervised_keys=None, # No default supervised_keys.
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
def download_url(dataset, file_name):
url = hf_hub_url(repo_id=dataset, filename=f"data/{file_name}.jsonl.xz", repo_type="dataset")
return dl_manager.download(url)
data_infos = []
languages = _LANGUAGES if self.config.language == "all" else [self.config.language]
for language in languages:
info = {"language": language}
for shard in range(_HIGHEST_NUMBER_OF_SHARDS):
try:
info["filepath"] = download_url("joelito/EU_Wikipedias", f"{self.config.date}/{language}_{shard}")
data_infos.append(info.copy())
except:
break # we found the last shard
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data_infos": data_infos})]
def _generate_examples(self, data_infos):
"""This function returns the examples in the raw (text) form by iterating on all the files."""
id_ = 0
for data_info in data_infos:
logger.info("Generating examples from = %s", data_info["filepath"])
try:
with xz.open(open(data_info["filepath"], "rb"), "rt", encoding="utf-8") as f:
for line in f:
if line:
example = json.loads(line)
if example is not None and isinstance(example, dict):
yield id_, {
"language": data_info["language"], # add the language
**example,
}
id_ += 1
except Exception:
logger.exception("Error while processing file %s", data_info["filepath"])
|