Datasets:

Languages:
Russian
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
expert-generated
found
Annotations Creators:
expert-generated
found
Source Datasets:
original
ArXiv:
Tags:
License:
File size: 3,010 Bytes
9ee07df
6e34a78
9ee07df
 
 
 
 
 
 
 
 
 
 
 
fca2b57
 
 
9ee07df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6e34a78
9ee07df
ebdb8f6
 
 
 
9ee07df
326d65c
 
 
9ee07df
 
326d65c
9ee07df
67fd067
9ee07df
 
6e34a78
9ee07df
 
6e34a78
9ee07df
 
 
 
326d65c
 
9ee07df
 
 
 
 
 
 
 
326d65c
9ee07df
 
 
 
 
ebdb8f6
9ee07df
ebdb8f6
 
 
9ee07df
 
ebdb8f6
9ee07df
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and Ilya Gusev
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Lint as: python3
"""Gazeta: Dataset for Automatic Summarization of Russian News"""


import json
import os

import datasets


_CITATION = """
@InProceedings{10.1007/978-3-030-59082-6_9,
    author="Gusev, Ilya",
    editor="Filchenkov, Andrey and Kauttonen, Janne and Pivovarova, Lidia",
    title="Dataset for Automatic Summarization of Russian News",
    booktitle="Artificial Intelligence and Natural Language",
    year="2020",
    publisher="Springer International Publishing",
    address="Cham",
    pages="122--134",
    isbn="978-3-030-59082-6"
}
"""

_DESCRIPTION = "Dataset for automatic summarization of Russian news"
_HOMEPAGE = "https://github.com/IlyaGusev/gazeta"
_URLS = {
    "train": "gazeta_train.jsonl",
    "val": "gazeta_val.jsonl",
    "test": "gazeta_test.jsonl"
}
_DOCUMENT = "text"
_SUMMARY = "summary"


class GazetaDataset(datasets.GeneratorBasedBuilder):
    """Gazeta Dataset"""

    VERSION = datasets.Version("2.0.0")

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name="default", version=VERSION, description=""),
    ]

    DEFAULT_CONFIG_NAME = "default"

    def _info(self):
        features = datasets.Features(
            {
                _DOCUMENT: datasets.Value("string"),
                _SUMMARY: datasets.Value("string"),
                "title": datasets.Value("string"),
                "date": datasets.Value("string"),
                "url": datasets.Value("string")
            }
        )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            supervised_keys=(_DOCUMENT, _SUMMARY),
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        downloaded_files = dl_manager.download_and_extract(_URLS)
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["val"]}),
        ]

    def _generate_examples(self, filepath):
        with open(filepath, encoding="utf-8") as f:
            for id_, row in enumerate(f):
                data = json.loads(row)
                yield id_, data