system HF staff commited on
Commit
4c1f667
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (4) hide show
  1. .gitattributes +27 -0
  2. dataset_infos.json +1 -0
  3. dummy/1.0.0/dummy_data.zip +3 -0
  4. newsroom.py +144 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "\nNEWSROOM is a large dataset for training and evaluating summarization systems.\nIt contains 1.3 million articles and summaries written by authors and\neditors in the newsrooms of 38 major publications.\n\nDataset features includes:\n - text: Input news text.\n - summary: Summary for the news.\nAnd additional features:\n - title: news title.\n - url: url of the news.\n - date: date of the article.\n - density: extractive density.\n - coverage: extractive coverage.\n - compression: compression ratio.\n - density_bin: low, medium, high.\n - coverage_bin: extractive, abstractive.\n - compression_bin: low, medium, high.\n\nThis dataset can be downloaded upon requests. Unzip all the contents\n\"train.jsonl, dev.josnl, test.jsonl\" to the tfds folder.\n\n", "citation": "\n@inproceedings{N18-1065,\n author = {Grusky, Max and Naaman, Mor and Artzi, Yoav},\n title = {NEWSROOM: A Dataset of 1.3 Million Summaries\n with Diverse Extractive Strategies},\n booktitle = {Proceedings of the 2018 Conference of the\n North American Chapter of the Association for\n Computational Linguistics: Human Language Technologies},\n year = {2018},\n}\n", "homepage": "https://summari.es", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "date": {"dtype": "string", "id": null, "_type": "Value"}, "density_bin": {"dtype": "string", "id": null, "_type": "Value"}, "coverage_bin": {"dtype": "string", "id": null, "_type": "Value"}, "compression_bin": {"dtype": "string", "id": null, "_type": "Value"}, "density": {"dtype": "float32", "id": null, "_type": "Value"}, "coverage": {"dtype": "float32", "id": null, "_type": "Value"}, "compression": {"dtype": "float32", "id": null, "_type": "Value"}}, "supervised_keys": {"input": "text", "output": "summary"}, "builder_name": "newsroom", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 472446866, "num_examples": 108862, "dataset_name": "newsroom"}, "train": {"name": "train", "num_bytes": 4357506078, "num_examples": 995041, "dataset_name": "newsroom"}, "validation": {"name": "validation", "num_bytes": 473206951, "num_examples": 108837, "dataset_name": "newsroom"}}, "download_checksums": {}, "download_size": 0, "dataset_size": 5303159895, "size_in_bytes": 5303159895}}
dummy/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a083c8805b149f1d7d3fedf6073e7c56828eb1666a2d6a69e43f18a8740b674
3
+ size 1209
newsroom.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """NEWSROOM Dataset."""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import json
22
+ import os
23
+
24
+ import datasets
25
+
26
+
27
+ _CITATION = """
28
+ @inproceedings{N18-1065,
29
+ author = {Grusky, Max and Naaman, Mor and Artzi, Yoav},
30
+ title = {NEWSROOM: A Dataset of 1.3 Million Summaries
31
+ with Diverse Extractive Strategies},
32
+ booktitle = {Proceedings of the 2018 Conference of the
33
+ North American Chapter of the Association for
34
+ Computational Linguistics: Human Language Technologies},
35
+ year = {2018},
36
+ }
37
+ """
38
+
39
+ _DESCRIPTION = """
40
+ NEWSROOM is a large dataset for training and evaluating summarization systems.
41
+ It contains 1.3 million articles and summaries written by authors and
42
+ editors in the newsrooms of 38 major publications.
43
+
44
+ Dataset features includes:
45
+ - text: Input news text.
46
+ - summary: Summary for the news.
47
+ And additional features:
48
+ - title: news title.
49
+ - url: url of the news.
50
+ - date: date of the article.
51
+ - density: extractive density.
52
+ - coverage: extractive coverage.
53
+ - compression: compression ratio.
54
+ - density_bin: low, medium, high.
55
+ - coverage_bin: extractive, abstractive.
56
+ - compression_bin: low, medium, high.
57
+
58
+ This dataset can be downloaded upon requests. Unzip all the contents
59
+ "train.jsonl, dev.josnl, test.jsonl" to the tfds folder.
60
+
61
+ """
62
+
63
+ _DOCUMENT = "text"
64
+ _SUMMARY = "summary"
65
+ _ADDITIONAL_TEXT_FEATURES = [
66
+ "title",
67
+ "url",
68
+ "date",
69
+ "density_bin",
70
+ "coverage_bin",
71
+ "compression_bin",
72
+ ]
73
+ _ADDITIONAL_FLOAT_FEATURES = [
74
+ "density",
75
+ "coverage",
76
+ "compression",
77
+ ]
78
+
79
+
80
+ class Newsroom(datasets.GeneratorBasedBuilder):
81
+ """NEWSROOM Dataset."""
82
+
83
+ VERSION = datasets.Version("1.0.0")
84
+
85
+ @property
86
+ def manual_download_instructions(self):
87
+ return """\
88
+ You should download the dataset from http://lil.datasets.cornell.edu/newsroom/
89
+ The webpage requires registration.
90
+ To unzip the .tar file run `tar -zxvf complete.tar`. To unzip the .gz files
91
+ run `gunzip train.json.gz` , ...
92
+ After downloading, please put the files under the following names
93
+ dev.jsonl, test.jsonl and train.jsonl in a dir of your choice,
94
+ which will be used as a manual_dir, e.g. `~/.manual_dirs/newsroom`
95
+ Newsroom can then be loaded via:
96
+ `datasets.load_dataset("newsroom", data_dir="~/.manual_dirs/newsroom")`.
97
+ """
98
+
99
+ def _info(self):
100
+ features = {k: datasets.Value("string") for k in [_DOCUMENT, _SUMMARY] + _ADDITIONAL_TEXT_FEATURES}
101
+ features.update({k: datasets.Value("float32") for k in _ADDITIONAL_FLOAT_FEATURES})
102
+ return datasets.DatasetInfo(
103
+ description=_DESCRIPTION,
104
+ features=datasets.Features(features),
105
+ supervised_keys=(_DOCUMENT, _SUMMARY),
106
+ homepage="http://lil.datasets.cornell.edu/newsroom/",
107
+ citation=_CITATION,
108
+ )
109
+
110
+ def _split_generators(self, dl_manager):
111
+ """Returns SplitGenerators."""
112
+ data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
113
+ if not os.path.exists(data_dir):
114
+ raise FileNotFoundError(
115
+ "{} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('newsroom', data_dir=...)` that includes files unzipped from the reclor zip. Manual download instructions: {}".format(
116
+ data_dir, self.manual_download_instructions
117
+ )
118
+ )
119
+ return [
120
+ datasets.SplitGenerator(
121
+ name=datasets.Split.TRAIN,
122
+ gen_kwargs={"input_file": os.path.join(data_dir, "train.jsonl")},
123
+ ),
124
+ datasets.SplitGenerator(
125
+ name=datasets.Split.VALIDATION,
126
+ gen_kwargs={"input_file": os.path.join(data_dir, "dev.jsonl")},
127
+ ),
128
+ datasets.SplitGenerator(
129
+ name=datasets.Split.TEST,
130
+ gen_kwargs={"input_file": os.path.join(data_dir, "test.jsonl")},
131
+ ),
132
+ ]
133
+
134
+ def _generate_examples(self, input_file=None):
135
+ """Yields examples."""
136
+ with open(input_file, encoding="utf-8") as f:
137
+ for i, line in enumerate(f):
138
+ d = json.loads(line)
139
+ # fields are "url", "archive", "title", "date", "text",
140
+ # "compression_bin", "density_bin", "summary", "density",
141
+ # "compression', "coverage", "coverage_bin",
142
+ yield i, {
143
+ k: d[k] for k in [_DOCUMENT, _SUMMARY] + _ADDITIONAL_TEXT_FEATURES + _ADDITIONAL_FLOAT_FEATURES
144
+ }