IlyaGusev commited on
Commit
9ee07df
1 Parent(s): 55280de
Files changed (2) hide show
  1. dataset_infos.json +1 -0
  2. gazeta.py +125 -0
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"github.com": {"description": "Dataset for automatic summarization of Russian news\n", "citation": "\n@InProceedings{10.1007/978-3-030-59082-6_9,\n author=\"Gusev, Ilya\",\n editor=\"Filchenkov, Andrey and Kauttonen, Janne and Pivovarova, Lidia\",\n title=\"Dataset for Automatic Summarization of Russian News\",\n booktitle=\"Artificial Intelligence and Natural Language\",\n year=\"2020\",\n publisher=\"Springer International Publishing\",\n address=\"Cham\",\n pages=\"122--134\",\n isbn=\"978-3-030-59082-6\"\n}\n", "homepage": "https://github.com/IlyaGusev/gazeta", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "date": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "gazeta_dataset", "config_name": "github.com", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 468510447, "num_examples": 52400, "dataset_name": "gazeta_dataset"}, "test": {"name": "test", "num_bytes": 51888697, "num_examples": 5770, "dataset_name": "gazeta_dataset"}, "validation": {"name": "validation", "num_bytes": 48394731, "num_examples": 5265, "dataset_name": "gazeta_dataset"}}, "download_checksums": {"https://github.com/IlyaGusev/gazeta/releases/download/0.1/gazeta_jsonl.tar.gz": {"num_bytes": 158907783, "checksum": "43787350a7690803f6a50b99976c1884abf2d3a0246cea2a68e394b2b7d7fa66"}}, "download_size": 158907783, "post_processing_size": null, "dataset_size": 568793875, "size_in_bytes": 727701658}}
gazeta.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Dataset for automatic summarization of Russian news"""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+
22
+ import datasets
23
+
24
+
25
+ _CITATION = """
26
+ @InProceedings{10.1007/978-3-030-59082-6_9,
27
+ author="Gusev, Ilya",
28
+ editor="Filchenkov, Andrey and Kauttonen, Janne and Pivovarova, Lidia",
29
+ title="Dataset for Automatic Summarization of Russian News",
30
+ booktitle="Artificial Intelligence and Natural Language",
31
+ year="2020",
32
+ publisher="Springer International Publishing",
33
+ address="Cham",
34
+ pages="122--134",
35
+ isbn="978-3-030-59082-6"
36
+ }
37
+ """
38
+
39
+ _DESCRIPTION = """\
40
+ Dataset for automatic summarization of Russian news
41
+ """
42
+
43
+ _HOMEPAGE = "https://github.com/IlyaGusev/gazeta"
44
+
45
+ _LICENSE = ""
46
+
47
+ _URLs = {
48
+ 'github.com': "https://github.com/IlyaGusev/gazeta/releases/download/0.1/gazeta_jsonl.tar.gz",
49
+ }
50
+
51
+
52
+ class GazetaDataset(datasets.GeneratorBasedBuilder):
53
+ """Dataset for automatic summarization of Russian news"""
54
+
55
+ VERSION = datasets.Version("1.1.0")
56
+
57
+ BUILDER_CONFIGS = [
58
+ datasets.BuilderConfig(name="github.com", version=VERSION, description=""),
59
+ ]
60
+
61
+ DEFAULT_CONFIG_NAME = "github.com" # It's not mandatory to have a default configuration. Just use one if it make sense.
62
+
63
+ def _info(self):
64
+ features = datasets.Features(
65
+ {
66
+ "text": datasets.Value("string"),
67
+ "summary": datasets.Value("string"),
68
+ "title": datasets.Value("string"),
69
+ "date": datasets.Value("string"),
70
+ "url": datasets.Value("string")
71
+ }
72
+ )
73
+ return datasets.DatasetInfo(
74
+ description=_DESCRIPTION,
75
+ features=features,
76
+ supervised_keys=None,
77
+ homepage=_HOMEPAGE,
78
+ license=_LICENSE,
79
+ citation=_CITATION,
80
+ )
81
+
82
+ def _split_generators(self, dl_manager):
83
+ """Returns SplitGenerators."""
84
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
85
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
86
+
87
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
88
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
89
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
90
+ my_urls = _URLs[self.config.name]
91
+ data_dir = dl_manager.download_and_extract(my_urls)
92
+ return [
93
+ datasets.SplitGenerator(
94
+ name=datasets.Split.TRAIN,
95
+ # These kwargs will be passed to _generate_examples
96
+ gen_kwargs={
97
+ "filepath": os.path.join(data_dir, "gazeta_train.jsonl"),
98
+ "split": "train",
99
+ },
100
+ ),
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.TEST,
103
+ # These kwargs will be passed to _generate_examples
104
+ gen_kwargs={
105
+ "filepath": os.path.join(data_dir, "gazeta_test.jsonl"),
106
+ "split": "test"
107
+ },
108
+ ),
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.VALIDATION,
111
+ # These kwargs will be passed to _generate_examples
112
+ gen_kwargs={
113
+ "filepath": os.path.join(data_dir, "gazeta_val.jsonl"),
114
+ "split": "dev",
115
+ },
116
+ ),
117
+ ]
118
+
119
+ def _generate_examples(
120
+ self, filepath, split
121
+ ):
122
+ with open(filepath, encoding="utf-8") as f:
123
+ for id_, row in enumerate(f):
124
+ data = json.loads(row)
125
+ yield id_, data