|
from __future__ import annotations |
|
|
|
import random |
|
from pathlib import Path |
|
from typing import Generator |
|
|
|
import datasets |
|
|
|
_CITATION = "" |
|
_DESCRIPTION = "This is a dataset of livedoor news articles." |
|
_HOMEPAGE = "https://www.rondhuit.com/download.html#news%20corpus" |
|
_LICENSE = "This work is license under CC BY-ND 2.1 JP" |
|
_URL = "https://www.rondhuit.com/download/ldcc-20140209.tar.gz" |
|
|
|
|
|
class LivedoorNewsCorpusConfig(datasets.BuilderConfig): |
|
def __init__( |
|
self, |
|
name: str = "default", |
|
version: datasets.Version | str | None = datasets.Version("0.0.0"), |
|
data_dir: str | None = None, |
|
data_files: datasets.data_files.DataFilesDict | None = None, |
|
description: str | None = _DESCRIPTION, |
|
shuffle: bool = True, |
|
seed: int = 42, |
|
train_ratio: float = 0.8, |
|
validation_ratio: float = 0.1, |
|
) -> None: |
|
super().__init__( |
|
name=name, |
|
version=version, |
|
data_dir=data_dir, |
|
data_files=data_files, |
|
description=description, |
|
) |
|
self.shuffle = shuffle |
|
self.seed = seed |
|
self.train_ratio = train_ratio |
|
self.validation_ratio = validation_ratio |
|
|
|
|
|
class LivedoorNewsCorpus(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIG_CLASS = LivedoorNewsCorpusConfig |
|
|
|
def _info(self) -> datasets.DatasetInfo: |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
citation=_CITATION, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
features=datasets.Features( |
|
{ |
|
"url": datasets.Value("string"), |
|
"date": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"content": datasets.Value("string"), |
|
"category": datasets.Value("string"), |
|
} |
|
), |
|
) |
|
|
|
def _split_generators( |
|
self, dl_manager: datasets.DownloadManager |
|
) -> list[datasets.SplitGenerator]: |
|
dataset_dir = Path(dl_manager.download_and_extract(_URL)) |
|
|
|
data = [] |
|
for file_name in sorted(dataset_dir.glob("*/*/*")): |
|
if "LICENSE.txt" in str(file_name): |
|
continue |
|
with open(file_name, "r", encoding="utf-8") as f: |
|
d = [line.strip() for line in f] |
|
data.append( |
|
{ |
|
"url": d[0], |
|
"date": d[1], |
|
"title": d[2], |
|
"content": " ".join(d[3:]), |
|
"category": file_name.parent.name, |
|
} |
|
) |
|
|
|
if self.config.shuffle: |
|
random.seed(self.config.seed) |
|
random.shuffle(data) |
|
|
|
num_data = len(data) |
|
num_train_data = int(num_data * self.config.train_ratio) |
|
num_validation_data = int(num_data * self.config.validation_ratio) |
|
train_data = data[:num_train_data] |
|
validation_data = data[num_train_data : num_train_data + num_validation_data] |
|
test_data = data[num_train_data + num_validation_data :] |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"data": train_data} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, gen_kwargs={"data": validation_data} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={"data": test_data} |
|
), |
|
] |
|
|
|
def _generate_examples(self, data: list[dict[str, str]]) -> Generator: |
|
for i, d in enumerate(data): |
|
yield i, d |
|
|