File size: 3,707 Bytes
7b6463d
 
a7f7c1a
 
 
 
7fe34c2
a7f7c1a
 
 
 
 
 
 
 
7fe34c2
88e430c
 
 
7fe34c2
88e430c
7fe34c2
 
88e430c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7fe34c2
88e430c
 
7fe34c2
 
a7f7c1a
 
 
 
7fe34c2
a7f7c1a
7fe34c2
 
 
 
 
a7f7c1a
 
 
 
 
7fe34c2
 
a7f7c1a
 
 
88e430c
a7f7c1a
 
9869317
a7f7c1a
 
 
 
 
 
 
 
 
 
 
7fe34c2
88e430c
 
a7f7c1a
 
88e430c
 
a7f7c1a
7fe34c2
a7f7c1a
 
7fe34c2
 
 
 
 
 
 
 
ea2a8e3
a7f7c1a
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
from __future__ import annotations

import random
from pathlib import Path
from typing import Generator

import datasets

_CITATION = ""
_DESCRIPTION = "This is a dataset of livedoor news articles."
_HOMEPAGE = "https://www.rondhuit.com/download.html#news%20corpus"
_LICENSE = "This work is license under CC BY-ND 2.1 JP"
_URL = "https://www.rondhuit.com/download/ldcc-20140209.tar.gz"


class LivedoorNewsCorpusConfig(datasets.BuilderConfig):
    def __init__(
        self,
        name: str = "default",
        version: datasets.Version | str | None = datasets.Version("0.0.0"),
        data_dir: str | None = None,
        data_files: datasets.data_files.DataFilesDict | None = None,
        description: str | None = _DESCRIPTION,
        shuffle: bool = True,
        seed: int = 42,
        train_ratio: float = 0.8,
        validation_ratio: float = 0.1,
    ) -> None:
        super().__init__(
            name=name,
            version=version,
            data_dir=data_dir,
            data_files=data_files,
            description=description,
        )
        self.shuffle = shuffle
        self.seed = seed
        self.train_ratio = train_ratio
        self.validation_ratio = validation_ratio


class LivedoorNewsCorpus(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIG_CLASS = LivedoorNewsCorpusConfig

    def _info(self) -> datasets.DatasetInfo:
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            citation=_CITATION,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            features=datasets.Features(
                {
                    "url": datasets.Value("string"),
                    "date": datasets.Value("string"),
                    "title": datasets.Value("string"),
                    "content": datasets.Value("string"),
                    "category": datasets.Value("string"),
                }
            ),
        )

    def _split_generators(
        self, dl_manager: datasets.DownloadManager
    ) -> list[datasets.SplitGenerator]:
        dataset_dir = Path(dl_manager.download_and_extract(_URL))

        data = []
        for file_name in sorted(dataset_dir.glob("*/*/*")):
            if "LICENSE.txt" in str(file_name):
                continue
            with open(file_name, "r", encoding="utf-8") as f:
                d = [line.strip() for line in f]
                data.append(
                    {
                        "url": d[0],
                        "date": d[1],
                        "title": d[2],
                        "content": " ".join(d[3:]),
                        "category": file_name.parent.name,
                    }
                )

        if self.config.shuffle:
            random.seed(self.config.seed)
            random.shuffle(data)

        num_data = len(data)
        num_train_data = int(num_data * self.config.train_ratio)
        num_validation_data = int(num_data * self.config.validation_ratio)
        train_data = data[:num_train_data]
        validation_data = data[num_train_data : num_train_data + num_validation_data]
        test_data = data[num_train_data + num_validation_data :]
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN, gen_kwargs={"data": train_data}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION, gen_kwargs={"data": validation_data}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST, gen_kwargs={"data": test_data}
            ),
        ]

    def _generate_examples(self, data: list[dict[str, str]]) -> Generator:
        for i, d in enumerate(data):
            yield i, d