system HF staff commited on
Commit
cda863e
·
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "\nMulti-News, consists of news articles and human-written summaries\nof these articles from the site newser.com.\nEach summary is professionally written by editors and\nincludes links to the original articles cited.\n\nThere are two features:\n - document: text of news articles seperated by special token \"|||||\".\n - summary: news summary.\n", "citation": "\n@misc{alex2019multinews,\n title={Multi-News: a Large-Scale Multi-Document Summarization Dataset and Abstractive Hierarchical Model},\n author={Alexander R. Fabbri and Irene Li and Tianwei She and Suyi Li and Dragomir R. Radev},\n year={2019},\n eprint={1906.01749},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://github.com/Alex-Fabbri/Multi-News", "license": "", "features": {"document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": {"input": "document", "output": "summary"}, "builder_name": "multi_news", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 70391756, "num_examples": 5622, "dataset_name": "multi_news"}, "train": {"name": "train", "num_bytes": 561166101, "num_examples": 44972, "dataset_name": "multi_news"}, "validation": {"name": "validation", "num_bytes": 68619448, "num_examples": 5622, "dataset_name": "multi_news"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1vRY2wM6rlOZrf9exGTm5pXj5ExlVwJ0C": {"num_bytes": 256966232, "checksum": "64ae4d2483b248c9664b50bacfab6821f8a3e93f382c7587686fa4a127f77626"}}, "download_size": 256966232, "dataset_size": 700177305, "size_in_bytes": 957143537}}
dummy/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1006c08da22ccb16f449245f20bee7ddceb776a3746463e675d1f7b567f55344
3
+ size 1770
multi_news.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Multi-News dataset."""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import os
22
+
23
+ import datasets
24
+
25
+
26
+ _CITATION = """
27
+ @misc{alex2019multinews,
28
+ title={Multi-News: a Large-Scale Multi-Document Summarization Dataset and Abstractive Hierarchical Model},
29
+ author={Alexander R. Fabbri and Irene Li and Tianwei She and Suyi Li and Dragomir R. Radev},
30
+ year={2019},
31
+ eprint={1906.01749},
32
+ archivePrefix={arXiv},
33
+ primaryClass={cs.CL}
34
+ }
35
+ """
36
+
37
+ _DESCRIPTION = """
38
+ Multi-News, consists of news articles and human-written summaries
39
+ of these articles from the site newser.com.
40
+ Each summary is professionally written by editors and
41
+ includes links to the original articles cited.
42
+
43
+ There are two features:
44
+ - document: text of news articles seperated by special token "|||||".
45
+ - summary: news summary.
46
+ """
47
+
48
+ _URL = "https://drive.google.com/uc?export=download&id=1vRY2wM6rlOZrf9exGTm5pXj5ExlVwJ0C"
49
+
50
+ _DOCUMENT = "document"
51
+ _SUMMARY = "summary"
52
+
53
+
54
+ class MultiNews(datasets.GeneratorBasedBuilder):
55
+ """Multi-News dataset."""
56
+
57
+ VERSION = datasets.Version("1.0.0")
58
+
59
+ def _info(self):
60
+ return datasets.DatasetInfo(
61
+ description=_DESCRIPTION,
62
+ features=datasets.Features({_DOCUMENT: datasets.Value("string"), _SUMMARY: datasets.Value("string")}),
63
+ supervised_keys=(_DOCUMENT, _SUMMARY),
64
+ homepage="https://github.com/Alex-Fabbri/Multi-News",
65
+ citation=_CITATION,
66
+ )
67
+
68
+ def _split_generators(self, dl_manager):
69
+ """Returns SplitGenerators."""
70
+ extract_path = os.path.join(dl_manager.download_and_extract(_URL), "multi-news-original")
71
+ return [
72
+ datasets.SplitGenerator(
73
+ name=datasets.Split.TRAIN,
74
+ gen_kwargs={"path": os.path.join(extract_path, "train")},
75
+ ),
76
+ datasets.SplitGenerator(
77
+ name=datasets.Split.VALIDATION,
78
+ gen_kwargs={"path": os.path.join(extract_path, "val")},
79
+ ),
80
+ datasets.SplitGenerator(
81
+ name=datasets.Split.TEST,
82
+ gen_kwargs={"path": os.path.join(extract_path, "test")},
83
+ ),
84
+ ]
85
+
86
+ def _generate_examples(self, path=None):
87
+ """Yields examples."""
88
+ with open(os.path.join(path + ".src"), encoding="utf-8") as src_f, open(
89
+ os.path.join(path + ".tgt"), encoding="utf-8"
90
+ ) as tgt_f:
91
+ for i, (src_line, tgt_line) in enumerate(zip(src_f, tgt_f)):
92
+ yield i, {
93
+ # In original file, each line has one example and natural newline
94
+ # tokens "\n" are being replaced with "NEWLINE_CHAR". Here restore
95
+ # the natural newline token to avoid special vocab "NEWLINE_CHAR".
96
+ _DOCUMENT: src_line.strip().replace("NEWLINE_CHAR", "\n"),
97
+ # Remove the starting token "- " for every target sequence.
98
+ _SUMMARY: tgt_line.strip().lstrip("- "),
99
+ }