wikipedia_bn / wikipedia_bn.py
lhoestq's picture
lhoestq HF staff
initial commit
4ce2366
raw history blame
No virus
2.53 kB
"""Bengali wikipedia from 03/20/2021"""
import os
import pyarrow.parquet as pq
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@ONLINE {wikidump,
author = {Wikimedia Foundation},
title = {Wikimedia Downloads},
url = {https://dumps.wikimedia.org}
}
"""
_DESCRIPTION = """\
Bengali Wikipedia from the dump of 03/20/2021.
The data was processed using the huggingface datasets wikipedia script early april 2021.
The dataset was built from the Wikipedia dump (https://dumps.wikimedia.org/).
Each example contains the content of one full Wikipedia article with cleaning to strip
markdown and unwanted sections (references, etc.).
"""
_LICENSE = (
"This work is licensed under the Creative Commons Attribution-ShareAlike "
"3.0 Unported License. To view a copy of this license, visit "
"http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to "
"Creative Commons, PO Box 1866, Mountain View, CA 94042, USA."
)
_N_SHARDS = 10
_URLS = {
"train": [f"data/20210320/shard-{i:05d}-of-{_N_SHARDS:05d}.parquet" for i in range(_N_SHARDS)],
}
class WikipediaBn(datasets.ArrowBasedBuilder):
"""SQUAD: The Stanford Question Answering Dataset. Version 1.1."""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"title": datasets.Value("string"),
"text": datasets.Value("string"),
}
),
# No default supervised_keys (as we have to pass both question
# and context as input).
supervised_keys=None,
homepage="https://dumps.wikimedia.org",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files["train"]}),
]
def _generate_tables(self, filepaths):
"""This function returns the examples in the raw (text) form."""
for filepath in filepaths:
logger.info("generating examples from = %s", filepath)
filepath_id = os.path.basename(filepath)
with open(filepath, "rb") as f:
pf = pq.ParquetFile(f)
for i in range(pf.num_row_groups):
id_ = f"{filepath_id}_{i}"
yield id_, pf.read_row_group(i)