|
import os |
|
from collections import defaultdict |
|
import datasets |
|
from .process import process_text, get_structured_data |
|
from typing import List |
|
from math import ceil |
|
from .configs import SUB_DATASETS |
|
from datasets import load_dataset |
|
def processing(data, name): |
|
if name == "processed": |
|
data['text'] = process_text(data['text']) |
|
elif name == "structured": |
|
data['text'] = process_text(data['text']) |
|
data['structured_text'] = get_structured_data(data['text'], default_value={"item": [], "content": []}) |
|
return data |
|
|
|
|
|
def sliding(texts: List[str], window_size: int=5, stride:int=3) -> List[str]: |
|
n_iter = ceil((len(texts)-window_size)/stride)+1 |
|
return [texts[i*stride:i*stride+window_size] for i in range(n_iter)] |
|
|
|
|
|
class NamuWiki(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = SUB_DATASETS |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description="", |
|
features=self.config.features, |
|
homepage=self.config.url, |
|
citation=self.config.citation + "\n" + "", |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: |
|
if self.config.name == "processed": |
|
data_file = dl_manager.download(self.config.data_url) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"data_file": data_file, |
|
"split": "train" |
|
} |
|
), |
|
] |
|
|
|
elif self.config.name.startswith(("char", "word")): |
|
_, length = self.config.name.split("-") |
|
length = int(length) |
|
data_file = dl_manager.download(self.config.data_url) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"data_file": data_file, |
|
"split": "train", |
|
"length": length |
|
} |
|
), |
|
] |
|
|
|
elif self.config.name == "raw": |
|
data_files = dl_manager.download_and_extract(self.config.data_url) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"data_file": data_files, |
|
"split": "train" |
|
} |
|
), |
|
] |
|
|
|
def _generate_examples(self, data_file, split, length=None): |
|
n = 0 |
|
_dataset = load_dataset("parquet", data_files={"train": data_file}, split="train", use_auth_token=self.use_auth_token) |
|
for data in _dataset: |
|
yield n, processing(data, self.config.name) |
|
n += 1 |
|
|