File size: 2,534 Bytes
4ce2366
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
"""Bengali wikipedia from 03/20/2021"""

import os
import pyarrow.parquet as pq

import datasets


logger = datasets.logging.get_logger(__name__)


_CITATION = """\
@ONLINE {wikidump,
    author = {Wikimedia Foundation},
    title  = {Wikimedia Downloads},
    url    = {https://dumps.wikimedia.org}
}
"""

_DESCRIPTION = """\
Bengali Wikipedia from the dump of 03/20/2021.
The data was processed using the huggingface datasets wikipedia script early april 2021.
The dataset was built from the Wikipedia dump (https://dumps.wikimedia.org/).
Each example contains the content of one full Wikipedia article with cleaning to strip
markdown and unwanted sections (references, etc.).
"""

_LICENSE = (
    "This work is licensed under the Creative Commons Attribution-ShareAlike "
    "3.0 Unported License. To view a copy of this license, visit "
    "http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to "
    "Creative Commons, PO Box 1866, Mountain View, CA 94042, USA."
)

_N_SHARDS = 10

_URLS = {
    "train": [f"data/20210320/shard-{i:05d}-of-{_N_SHARDS:05d}.parquet" for i in range(_N_SHARDS)],
}


class WikipediaBn(datasets.ArrowBasedBuilder):
    """SQUAD: The Stanford Question Answering Dataset. Version 1.1."""

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "title": datasets.Value("string"),
                    "text": datasets.Value("string"),
                }
            ),
            # No default supervised_keys (as we have to pass both question
            # and context as input).
            supervised_keys=None,
            homepage="https://dumps.wikimedia.org",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        downloaded_files = dl_manager.download_and_extract(_URLS)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files["train"]}),
        ]

    def _generate_tables(self, filepaths):
        """This function returns the examples in the raw (text) form."""
        for filepath in filepaths:
            logger.info("generating examples from = %s", filepath)
            filepath_id = os.path.basename(filepath)
            with open(filepath, "rb") as f:
                pf = pq.ParquetFile(f)
                for i in range(pf.num_row_groups):
                    id_ = f"{filepath_id}_{i}"
                    yield id_, pf.read_row_group(i)