Datasets:

Modalities:
Text
Libraries:
Datasets
License:
File size: 3,475 Bytes
94a1be9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import os
from collections import defaultdict
from typing import List

import datasets
from datasets import Sequence, Value, load_dataset

from .process import process_text, get_structured_data
from typing import List
from math import ceil
from .configs import SUB_DATASETS

def processing(data, name):
    if name == "processed":
        data['text'] = [process_text(text) for text in data['text']]
    elif name == "structured":
        data['text'] = [process_text(text) for text in data['text']]
        data['structured_text'] = [
            get_structured_data(text, default_value={"item": [], "content": []}) for text in data['text']
        ]
    return data


def sliding(texts: List[str], window_size: int=5, stride:int=3) -> List[str]:
    n_iter = ceil((len(texts)-window_size)/stride)+1
    return [texts[i*stride:i*stride+window_size] for i in range(n_iter)]

class NamuWiki(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = SUB_DATASETS

    def _info(self):
        return datasets.DatasetInfo(
            description="",
            features=self.config.features,
            homepage=self.config.url,
            citation=self.config.citation + "\n" + "",
        )

    def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
        if self.config.name == "processed":
            data_file = dl_manager.download(self.config.data_url)
            return [
                datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    gen_kwargs={
                        "data_file": data_file,
                        "split": "train"
                    }
                ),
            ]

        elif self.config.name.startswith(("char", "word")):
            _, length = self.config.name.split("-")
            length = int(length)
            data_file = dl_manager.download(self.config.data_url)
            return [
                datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    gen_kwargs={
                        "data_file": data_file,
                        "split": "train",
                        "length": length
                    }
                ),
            ]

        elif self.config.name == "raw":
            data_file = dl_manager.download_and_extract(self.config.data_url)
            return [
                datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    gen_kwargs={
                        "data_file": os.path.join(data_file, "namuwiki_20210301.json"),
                        "split": "train"
                    }
                ),
            ]

    def _generate_examples(self, data_file, split, length=None):
        os.system("pip install ijson")
        import ijson
        """Generate NamuWiki examples."""
        _TARGET = {"title", "text", "contributors.item"}
        n, output = 0, defaultdict(list)
        with open(data_file) as f:
            for key, dtype, value in ijson.parse(f):
                key = key.replace("item.", "")
                if key == "namespace" and len(output):
                    output = {k: (v[0] if k != "contributors" else v) for k, v in output.items()}
                    yield n, processing(output, self.config.name)
                    output = defaultdict(list)
                    n += 1
                elif key in _TARGET:
                    output[key.replace(".item", "")].append(value)