wikipedia / wikipedia.py
albertvillanova's picture
Add version.id to allow filtering duplicate articles
b3e7891 verified
raw
history blame
7.73 kB
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
"""Wikipedia dataset containing cleaned articles of all languages."""
import datetime
import json
import mwparserfromhtml
import datasets
logger = datasets.logging.get_logger(__name__)
# TODO: check
_HOMEPAGE = "https://enterprise.wikimedia.com/docs/snapshot/"
# TODO: check
_CITATION = """\
@ONLINE {wikidump,
author = {Wikimedia Enterprise},
title = {Wikimedia Snapshot API},
url = {https://enterprise.wikimedia.com/docs/snapshot/}
}
"""
# TODO: check
_DESCRIPTION = """\
Wikipedia dataset containing cleaned articles for all languages.
The datasets are built from the Wikimedia Enterprise HTML Snapshot API
(https://enterprise.wikimedia.com/docs/snapshot/) with one split per language. Each example
contains the content of one full Wikipedia article with cleaning to strip
markdown and unwanted sections (references, etc.).
"""
# TODO: check
_LICENSE = (
"This work is licensed under the Creative Commons Attribution-ShareAlike "
"3.0 Unported License. To view a copy of this license, visit "
"http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to "
"Creative Commons, PO Box 1866, Mountain View, CA 94042, USA."
)
_HOST = "https://api.enterprise.wikimedia.com"
_URL_PATH = "/v2/snapshots/{language}wiki_namespace_0/download"
# Needs headers: {"Authorization": f"Bearer {ACCESS_TOKEN}"}
# See: https://enterprise.wikimedia.com/docs/#getting-api-keys
_VERSION = datasets.Version("5.0.0", "")
class WikipediaConfig(datasets.BuilderConfig):
"""BuilderConfig for Wikipedia."""
def __init__(self, language=None, host=_HOST, version=_VERSION, **kwargs):
"""BuilderConfig for Wikipedia.
Args:
language (str): Language code for the Wikipedia dump to use.
date (str): Date of the Wikipedia dump in YYYYMMDD format. A list of
available dates can be found at https://dumps.wikimedia.org/enwiki/.
**kwargs: Keyword arguments forwarded to super.
Attributes:
date (str): Month of the Wikipedia Enterprise Snapshot in YYYYMMDD format.
host (str, defaults to 'https://api.enterprise.wikimedia.com'):
URL of the server that hosts the Wikimedia Enterprise API.
It defaults to the official Wikimedia host.
"""
date = datetime.datetime.now(datetime.timezone.utc).strftime("%Y%m")
super().__init__(
name=f"{date}.{language}",
description=f"Wikipedia dataset for {language}, parsed from {date} dump.",
version=version,
**kwargs,
)
# Wikimedia Enterprise updates free Snapshots monthly, so "date" is the current month, e.g. "202407"
self.date = date
self.language = language
self.host = host.rstrip("/")
class Wikipedia(datasets.GeneratorBasedBuilder):
"""Wikipedia dataset."""
BUILDER_CONFIG_CLASS = WikipediaConfig
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"title": datasets.Value("string"),
"text": datasets.Value("string"),
"id": datasets.Value("string"),
"version.id": datasets.Value("string"),
"url": datasets.Value("string"),
}
),
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _get_base_url(self):
return self.config.host + _URL_PATH.format(language=self.config.language.replace("-", "_"))
def _split_generators(self, dl_manager):
if dl_manager.is_streaming:
data_path = dl_manager.download(self._get_base_url())
files = dl_manager.iter_archive(data_path)
else:
data_path = dl_manager.download_and_extract(self._get_base_url())
files = list(dl_manager.iter_files(data_path))
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": files,
},
)
]
def _generate_examples(self, files):
for line in self._generate_lines(files):
document = json.loads(line)
title = document["name"]
url = document["url"]
identifier = document["identifier"]
version_identifier = document.get("version", {}).get("identifier", "")
article = mwparserfromhtml.Article(document["article_body"]["html"])
text = html_to_text(article)
if text:
yield f"{identifier}.{version_identifier}", {
"title": title,
"text": text,
"id": identifier,
"version.id": version_identifier,
"url": url,
}
@staticmethod
def _generate_lines(files):
if isinstance(files, list): # list of iter_files paths
for path in files:
with open(path, "rb") as file:
yield from file
else: # iter_archive files
for _, file in files:
yield from file
def html_to_text(article: mwparserfromhtml.Article):
"""Convert Parsoid HTML to reasonable plaintext."""
# this catches things like infoboxes or message boxes that might have paragraph elements
# within them but are fully-transcluded and so are probably boilerplate messages and
# unlikely to be topic-specific article content.
exclude_transcluded_paragraphs = True
# these elements generally are not text (e.g., Citations footnotes like `[1]`)
# or do not have well-formed text such as Tables or Lists.
# A less conservative approach might retain Wikitables or Tables but apply some
# additional guardrails around the length of content from a specific list element
# or table cell to be included. In reality, that'd require re-writing the
# `get_plaintext` function:
# https://gitlab.wikimedia.org/repos/research/html-dumps/-/blob/main/src/mwparserfromhtml/parse/article.py?ref_type=heads#L325
exclude_elements = {
"Category",
"Citation",
"Comment",
"Heading",
"Infobox",
"List",
"Math",
"Media-audio",
"Media-img",
"Media-video",
"Messagebox",
"Navigational",
"Note",
"Reference",
"TF-sup", # superscript -- catches Citation-needed tags etc.
"Table",
"Wikitable",
}
# this ensures that only content that appears under a <p> element is retained.
# Much of this is redundant with the `exclude_elements` above and setting
# `exclude_transcluded_paragraphs` to True but this is a reasonable guardrail.
exclude_para_context = {"pre-first-para", "between-paras", "post-last-para"}
paragraphs = [
paragraph.strip()
for heading, paragraph in article.wikistew.get_plaintext(
exclude_transcluded_paragraphs=exclude_transcluded_paragraphs,
exclude_para_context=exclude_para_context,
exclude_elements=exclude_elements,
)
# if len(paragraph.strip()) > 15
]
# final check that more than a single paragraph and at least 20 characters.
# this mainly is to catch some bugs in the Enterprise dumps where e.g., poorly-
# formatted redirects manage to slip through still.
if paragraphs:
plaintext = "\n".join(paragraphs)
# if len(plaintext) > 20:
# return plaintext
return plaintext
return None