|
|
|
|
|
|
|
"""Wikipedia dataset containing cleaned articles of all languages.""" |
|
|
|
import datetime |
|
import json |
|
|
|
import mwparserfromhtml |
|
|
|
import datasets |
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
|
|
_HOMEPAGE = "https://enterprise.wikimedia.com/docs/snapshot/" |
|
|
|
_CITATION = """\ |
|
@ONLINE {wikidump, |
|
author = {Wikimedia Enterprise}, |
|
title = {Wikimedia Snapshot API}, |
|
url = {https://enterprise.wikimedia.com/docs/snapshot/} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Wikipedia dataset containing cleaned articles for all languages. |
|
The datasets are built from the Wikimedia Enterprise HTML Snapshot API |
|
(https://enterprise.wikimedia.com/docs/snapshot/) with one split per language. Each example |
|
contains the content of one full Wikipedia article with cleaning to strip |
|
markdown and unwanted sections (references, etc.). |
|
""" |
|
|
|
_LICENSE = ( |
|
"This work is licensed under the Creative Commons Attribution-ShareAlike " |
|
"3.0 Unported License. To view a copy of this license, visit " |
|
"http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to " |
|
"Creative Commons, PO Box 1866, Mountain View, CA 94042, USA." |
|
) |
|
|
|
_HOST = "https://api.enterprise.wikimedia.com" |
|
_URL_PATH = "/v2/snapshots/{language}wiki_namespace_0/download" |
|
|
|
|
|
|
|
_VERSION = datasets.Version("5.0.0", "") |
|
|
|
|
|
class WikipediaConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Wikipedia.""" |
|
|
|
def __init__(self, language=None, host=_HOST, version=_VERSION, **kwargs): |
|
"""BuilderConfig for Wikipedia. |
|
|
|
Args: |
|
language (str): Language code for the Wikipedia dump to use. |
|
date (str): Date of the Wikipedia dump in YYYYMMDD format. A list of |
|
available dates can be found at https://dumps.wikimedia.org/enwiki/. |
|
|
|
**kwargs: Keyword arguments forwarded to super. |
|
|
|
Attributes: |
|
date (str): Month of the Wikipedia Enterprise Snapshot in YYYYMMDD format. |
|
host (str, defaults to 'https://api.enterprise.wikimedia.com'): |
|
URL of the server that hosts the Wikimedia Enterprise API. |
|
It defaults to the official Wikimedia host. |
|
""" |
|
date = datetime.datetime.now(datetime.timezone.utc).strftime("%Y%m") |
|
super().__init__( |
|
name=f"{date}.{language}", |
|
description=f"Wikipedia dataset for {language}, parsed from {date} dump.", |
|
version=version, |
|
**kwargs, |
|
) |
|
|
|
self.date = date |
|
self.language = language |
|
self.host = host.rstrip("/") |
|
|
|
|
|
class Wikipedia(datasets.GeneratorBasedBuilder): |
|
"""Wikipedia dataset.""" |
|
|
|
BUILDER_CONFIG_CLASS = WikipediaConfig |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"title": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"id": datasets.Value("string"), |
|
"version.id": datasets.Value("string"), |
|
"url": datasets.Value("string"), |
|
} |
|
), |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _get_base_url(self): |
|
return self.config.host + _URL_PATH.format(language=self.config.language.replace("-", "_")) |
|
|
|
def _split_generators(self, dl_manager): |
|
if dl_manager.is_streaming: |
|
data_path = dl_manager.download(self._get_base_url()) |
|
files = dl_manager.iter_archive(data_path) |
|
else: |
|
data_path = dl_manager.download_and_extract(self._get_base_url()) |
|
files = list(dl_manager.iter_files(data_path)) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"files": files, |
|
}, |
|
) |
|
] |
|
|
|
def _generate_examples(self, files): |
|
for line in self._generate_lines(files): |
|
document = json.loads(line) |
|
title = document["name"] |
|
url = document["url"] |
|
identifier = document["identifier"] |
|
version_identifier = document.get("version", {}).get("identifier", "") |
|
article = mwparserfromhtml.Article(document["article_body"]["html"]) |
|
text = html_to_text(article) |
|
if text: |
|
yield f"{identifier}.{version_identifier}", { |
|
"title": title, |
|
"text": text, |
|
"id": identifier, |
|
"version.id": version_identifier, |
|
"url": url, |
|
} |
|
|
|
@staticmethod |
|
def _generate_lines(files): |
|
if isinstance(files, list): |
|
for path in files: |
|
with open(path, "rb") as file: |
|
yield from file |
|
else: |
|
for _, file in files: |
|
yield from file |
|
|
|
|
|
def html_to_text(article: mwparserfromhtml.Article): |
|
"""Convert Parsoid HTML to reasonable plaintext.""" |
|
|
|
|
|
|
|
exclude_transcluded_paragraphs = True |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
exclude_elements = { |
|
"Category", |
|
"Citation", |
|
"Comment", |
|
"Heading", |
|
"Infobox", |
|
"List", |
|
"Math", |
|
"Media-audio", |
|
"Media-img", |
|
"Media-video", |
|
"Messagebox", |
|
"Navigational", |
|
"Note", |
|
"Reference", |
|
"TF-sup", |
|
"Table", |
|
"Wikitable", |
|
} |
|
|
|
|
|
|
|
|
|
exclude_para_context = {"pre-first-para", "between-paras", "post-last-para"} |
|
|
|
paragraphs = [ |
|
paragraph.strip() |
|
for heading, paragraph in article.wikistew.get_plaintext( |
|
exclude_transcluded_paragraphs=exclude_transcluded_paragraphs, |
|
exclude_para_context=exclude_para_context, |
|
exclude_elements=exclude_elements, |
|
) |
|
|
|
] |
|
|
|
|
|
|
|
|
|
if paragraphs: |
|
plaintext = "\n".join(paragraphs) |
|
|
|
|
|
return plaintext |
|
return None |
|
|