Datasets:
File size: 7,731 Bytes
ce078cf 8e3e8db ce078cf 8e3e8db ce078cf 8e3e8db ce078cf 8e3e8db ce078cf 8e3e8db ce078cf 8e3e8db ce078cf 8e3e8db ce078cf 8e3e8db ce078cf 8e3e8db ce078cf 8e3e8db ce078cf b35d406 8e3e8db ce078cf 8e3e8db ce078cf 8e3e8db ce078cf b35d406 ce078cf b3e7891 ce078cf 0bad575 8e3e8db ce078cf 0bad575 5ada2aa f675c5f 5ada2aa f675c5f e7ad362 2a7a933 b3e7891 e7ad362 b3e7891 e7ad362 f675c5f 5ada2aa e7ad362 f675c5f 8e3e8db c8337f5 8e3e8db c8337f5 8e3e8db |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
"""Wikipedia dataset containing cleaned articles of all languages."""
import datetime
import json
import mwparserfromhtml
import datasets
logger = datasets.logging.get_logger(__name__)
# TODO: check
_HOMEPAGE = "https://enterprise.wikimedia.com/docs/snapshot/"
# TODO: check
_CITATION = """\
@ONLINE {wikidump,
author = {Wikimedia Enterprise},
title = {Wikimedia Snapshot API},
url = {https://enterprise.wikimedia.com/docs/snapshot/}
}
"""
# TODO: check
_DESCRIPTION = """\
Wikipedia dataset containing cleaned articles for all languages.
The datasets are built from the Wikimedia Enterprise HTML Snapshot API
(https://enterprise.wikimedia.com/docs/snapshot/) with one split per language. Each example
contains the content of one full Wikipedia article with cleaning to strip
markdown and unwanted sections (references, etc.).
"""
# TODO: check
_LICENSE = (
"This work is licensed under the Creative Commons Attribution-ShareAlike "
"3.0 Unported License. To view a copy of this license, visit "
"http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to "
"Creative Commons, PO Box 1866, Mountain View, CA 94042, USA."
)
_HOST = "https://api.enterprise.wikimedia.com"
_URL_PATH = "/v2/snapshots/{language}wiki_namespace_0/download"
# Needs headers: {"Authorization": f"Bearer {ACCESS_TOKEN}"}
# See: https://enterprise.wikimedia.com/docs/#getting-api-keys
_VERSION = datasets.Version("5.0.0", "")
class WikipediaConfig(datasets.BuilderConfig):
"""BuilderConfig for Wikipedia."""
def __init__(self, language=None, host=_HOST, version=_VERSION, **kwargs):
"""BuilderConfig for Wikipedia.
Args:
language (str): Language code for the Wikipedia dump to use.
date (str): Date of the Wikipedia dump in YYYYMMDD format. A list of
available dates can be found at https://dumps.wikimedia.org/enwiki/.
**kwargs: Keyword arguments forwarded to super.
Attributes:
date (str): Month of the Wikipedia Enterprise Snapshot in YYYYMMDD format.
host (str, defaults to 'https://api.enterprise.wikimedia.com'):
URL of the server that hosts the Wikimedia Enterprise API.
It defaults to the official Wikimedia host.
"""
date = datetime.datetime.now(datetime.timezone.utc).strftime("%Y%m")
super().__init__(
name=f"{date}.{language}",
description=f"Wikipedia dataset for {language}, parsed from {date} dump.",
version=version,
**kwargs,
)
# Wikimedia Enterprise updates free Snapshots monthly, so "date" is the current month, e.g. "202407"
self.date = date
self.language = language
self.host = host.rstrip("/")
class Wikipedia(datasets.GeneratorBasedBuilder):
"""Wikipedia dataset."""
BUILDER_CONFIG_CLASS = WikipediaConfig
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"title": datasets.Value("string"),
"text": datasets.Value("string"),
"id": datasets.Value("string"),
"version.id": datasets.Value("string"),
"url": datasets.Value("string"),
}
),
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _get_base_url(self):
return self.config.host + _URL_PATH.format(language=self.config.language.replace("-", "_"))
def _split_generators(self, dl_manager):
if dl_manager.is_streaming:
data_path = dl_manager.download(self._get_base_url())
files = dl_manager.iter_archive(data_path)
else:
data_path = dl_manager.download_and_extract(self._get_base_url())
files = list(dl_manager.iter_files(data_path))
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": files,
},
)
]
def _generate_examples(self, files):
for line in self._generate_lines(files):
document = json.loads(line)
title = document["name"]
url = document["url"]
identifier = document["identifier"]
version_identifier = document.get("version", {}).get("identifier", "")
article = mwparserfromhtml.Article(document["article_body"]["html"])
text = html_to_text(article)
if text:
yield f"{identifier}.{version_identifier}", {
"title": title,
"text": text,
"id": identifier,
"version.id": version_identifier,
"url": url,
}
@staticmethod
def _generate_lines(files):
if isinstance(files, list): # list of iter_files paths
for path in files:
with open(path, "rb") as file:
yield from file
else: # iter_archive files
for _, file in files:
yield from file
def html_to_text(article: mwparserfromhtml.Article):
"""Convert Parsoid HTML to reasonable plaintext."""
# this catches things like infoboxes or message boxes that might have paragraph elements
# within them but are fully-transcluded and so are probably boilerplate messages and
# unlikely to be topic-specific article content.
exclude_transcluded_paragraphs = True
# these elements generally are not text (e.g., Citations footnotes like `[1]`)
# or do not have well-formed text such as Tables or Lists.
# A less conservative approach might retain Wikitables or Tables but apply some
# additional guardrails around the length of content from a specific list element
# or table cell to be included. In reality, that'd require re-writing the
# `get_plaintext` function:
# https://gitlab.wikimedia.org/repos/research/html-dumps/-/blob/main/src/mwparserfromhtml/parse/article.py?ref_type=heads#L325
exclude_elements = {
"Category",
"Citation",
"Comment",
"Heading",
"Infobox",
"List",
"Math",
"Media-audio",
"Media-img",
"Media-video",
"Messagebox",
"Navigational",
"Note",
"Reference",
"TF-sup", # superscript -- catches Citation-needed tags etc.
"Table",
"Wikitable",
}
# this ensures that only content that appears under a <p> element is retained.
# Much of this is redundant with the `exclude_elements` above and setting
# `exclude_transcluded_paragraphs` to True but this is a reasonable guardrail.
exclude_para_context = {"pre-first-para", "between-paras", "post-last-para"}
paragraphs = [
paragraph.strip()
for heading, paragraph in article.wikistew.get_plaintext(
exclude_transcluded_paragraphs=exclude_transcluded_paragraphs,
exclude_para_context=exclude_para_context,
exclude_elements=exclude_elements,
)
# if len(paragraph.strip()) > 15
]
# final check that more than a single paragraph and at least 20 characters.
# this mainly is to catch some bugs in the Enterprise dumps where e.g., poorly-
# formatted redirects manage to slip through still.
if paragraphs:
plaintext = "\n".join(paragraphs)
# if len(plaintext) > 20:
# return plaintext
return plaintext
return None
|