wikipedia_html_enterprise / wikipedia_html_enterprise.py
SaulLu's picture
remove print
522a4c3
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Wikipedia dataset containing cleaned articles of all languages."""
import json
from tqdm import tqdm
import datasets
from pathlib import Path
logger = datasets.logging.get_logger(__name__)
_CITATION = """"""
_DESCRIPTION = """"""
_LICENSE = """"""
_VERSION = datasets.Version("2.0.0", "")
_NUM_SPLITS = 68
class WikipediaConfig(datasets.BuilderConfig):
"""BuilderConfig for Wikipedia."""
def __init__(self, shard=None, version=_VERSION, **kwargs):
"""BuilderConfig for Wikipedia.
Args:
split: int, split number.
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(
name=f"shard_{shard}",
description=f"Wikipedia dataset for split {shard}",
version=version,
**kwargs,
)
self.shard = shard
class Wikipedia(datasets.GeneratorBasedBuilder):
"""Wikipedia dataset."""
# Use mirror (your.org) to avoid download caps.
BUILDER_CONFIG_CLASS = WikipediaConfig
BUILDER_CONFIG = [WikipediaConfig(shard=str(id)) for id in range(_NUM_SPLITS)]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"identifier": datasets.Value("string"),
"name": datasets.Value("string"),
"namespace_name": datasets.Value("string"),
"namespace_identifier": datasets.Value("string"),
"categories": [
{
"name": datasets.Value("string"),
"url": datasets.Value("string"),
}
],
"date_modified": datasets.Value("string"),
"url": datasets.Value("string"),
"html": datasets.Value("string"),
"wikitext": datasets.Value("string"),
"in_language": datasets.Value("string"),
"main_entity": {
"identifier": datasets.Value("string"),
"url": datasets.Value("string"),
},
"is_part_of": {
"name": datasets.Value("string"),
"identifier": datasets.Value("string"),
},
"license": [
{
"name": datasets.Value("string"),
"url": datasets.Value("string"),
"identifier": datasets.Value("string"),
}
],
}
),
# No default supervised_keys.
supervised_keys=None,
homepage="https://dumps.wikimedia.org",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_paths = [Path(self.config.data_dir) / f"enwiki_{self.config.shard}.ndjson"]
return [
datasets.SplitGenerator( # pylint:disable=g-complex-comprehension
name=datasets.Split.TRAIN, gen_kwargs={"filepaths": data_paths}
)
]
def _generate_examples(
self,
filepaths,
):
for filepath in filepaths:
with open(filepath, "r") as f:
for line in tqdm(f):
example = json.loads(line)
clean_example = {}
clean_example["name"] = example["name"]
clean_example["identifier"] = example["identifier"]
clean_example["date_modified"] = example["date_modified"]
clean_example["namespace_name"] = example["namespace"]["name"]
clean_example["namespace_identifier"] = example["namespace"]["identifier"]
clean_example["categories"] = example.get("categories", None)
clean_example["url"] = example["url"]
clean_example["html"] = f'{example["article_body"]["html"]}'
clean_example["wikitext"] = example["article_body"]["wikitext"]
clean_example["in_language"] = example["in_language"]
clean_example["main_entity"] = example.get("main_entity", None)
clean_example["is_part_of"] = example["is_part_of"]
clean_example["license"] = example["license"]
yield clean_example["identifier"], clean_example