system HF staff commited on
Commit
a5c4e29
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
cnn_dailymail.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """CNN/DailyMail Summarization dataset, non-anonymized version."""
18
+ from __future__ import absolute_import, division, print_function
19
+
20
+ import hashlib
21
+ import logging
22
+ import os
23
+
24
+ import datasets
25
+
26
+
27
+ _DESCRIPTION = """\
28
+ CNN/DailyMail non-anonymized summarization dataset.
29
+
30
+ There are two features:
31
+ - article: text of news article, used as the document to be summarized
32
+ - highlights: joined text of highlights with <s> and </s> around each
33
+ highlight, which is the target summary
34
+ """
35
+
36
+ # The second citation introduces the source data, while the first
37
+ # introduces the specific form (non-anonymized) we use here.
38
+ _CITATION = """\
39
+ @article{DBLP:journals/corr/SeeLM17,
40
+ author = {Abigail See and
41
+ Peter J. Liu and
42
+ Christopher D. Manning},
43
+ title = {Get To The Point: Summarization with Pointer-Generator Networks},
44
+ journal = {CoRR},
45
+ volume = {abs/1704.04368},
46
+ year = {2017},
47
+ url = {http://arxiv.org/abs/1704.04368},
48
+ archivePrefix = {arXiv},
49
+ eprint = {1704.04368},
50
+ timestamp = {Mon, 13 Aug 2018 16:46:08 +0200},
51
+ biburl = {https://dblp.org/rec/bib/journals/corr/SeeLM17},
52
+ bibsource = {dblp computer science bibliography, https://dblp.org}
53
+ }
54
+
55
+ @inproceedings{hermann2015teaching,
56
+ title={Teaching machines to read and comprehend},
57
+ author={Hermann, Karl Moritz and Kocisky, Tomas and Grefenstette, Edward and Espeholt, Lasse and Kay, Will and Suleyman, Mustafa and Blunsom, Phil},
58
+ booktitle={Advances in neural information processing systems},
59
+ pages={1693--1701},
60
+ year={2015}
61
+ }
62
+ """
63
+
64
+ _DL_URLS = {
65
+ # pylint: disable=line-too-long
66
+ "cnn_stories": "https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ",
67
+ "dm_stories": "https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs",
68
+ "test_urls": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_test.txt",
69
+ "train_urls": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_train.txt",
70
+ "val_urls": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_val.txt",
71
+ # pylint: enable=line-too-long
72
+ }
73
+
74
+ _HIGHLIGHTS = "highlights"
75
+ _ARTICLE = "article"
76
+
77
+ _SUPPORTED_VERSIONS = [
78
+ # Using cased version.
79
+ datasets.Version("3.0.0", "Using cased version."),
80
+ # Same data as 0.0.2
81
+ datasets.Version("1.0.0", ""),
82
+ # Having the model predict newline separators makes it easier to evaluate
83
+ # using summary-level ROUGE.
84
+ datasets.Version("2.0.0", "Separate target sentences with newline."),
85
+ ]
86
+
87
+
88
+ _DEFAULT_VERSION = datasets.Version("3.0.0", "Using cased version.")
89
+
90
+
91
+ class CnnDailymailConfig(datasets.BuilderConfig):
92
+ """BuilderConfig for CnnDailymail."""
93
+
94
+ def __init__(self, **kwargs):
95
+ """BuilderConfig for CnnDailymail.
96
+
97
+ Args:
98
+
99
+ **kwargs: keyword arguments forwarded to super.
100
+ """
101
+ super(CnnDailymailConfig, self).__init__(**kwargs)
102
+
103
+
104
+ def _get_url_hashes(path):
105
+ """Get hashes of urls in file."""
106
+ urls = _read_text_file(path)
107
+
108
+ def url_hash(u):
109
+ h = hashlib.sha1()
110
+ try:
111
+ u = u.encode("utf-8")
112
+ except UnicodeDecodeError:
113
+ logging.error("Cannot hash url: %s", u)
114
+ h.update(u)
115
+ return h.hexdigest()
116
+
117
+ return {url_hash(u): True for u in urls}
118
+
119
+
120
+ def _get_hash_from_path(p):
121
+ """Extract hash from path."""
122
+ basename = os.path.basename(p)
123
+ return basename[0 : basename.find(".story")]
124
+
125
+
126
+ def _find_files(dl_paths, publisher, url_dict):
127
+ """Find files corresponding to urls."""
128
+ if publisher == "cnn":
129
+ top_dir = os.path.join(dl_paths["cnn_stories"], "cnn", "stories")
130
+ elif publisher == "dm":
131
+ top_dir = os.path.join(dl_paths["dm_stories"], "dailymail", "stories")
132
+ else:
133
+ logging.fatal("Unsupported publisher: %s", publisher)
134
+ files = sorted(os.listdir(top_dir))
135
+
136
+ ret_files = []
137
+ for p in files:
138
+ if _get_hash_from_path(p) in url_dict:
139
+ ret_files.append(os.path.join(top_dir, p))
140
+ return ret_files
141
+
142
+
143
+ def _subset_filenames(dl_paths, split):
144
+ """Get filenames for a particular split."""
145
+ assert isinstance(dl_paths, dict), dl_paths
146
+ # Get filenames for a split.
147
+ if split == datasets.Split.TRAIN:
148
+ urls = _get_url_hashes(dl_paths["train_urls"])
149
+ elif split == datasets.Split.VALIDATION:
150
+ urls = _get_url_hashes(dl_paths["val_urls"])
151
+ elif split == datasets.Split.TEST:
152
+ urls = _get_url_hashes(dl_paths["test_urls"])
153
+ else:
154
+ logging.fatal("Unsupported split: %s", split)
155
+ cnn = _find_files(dl_paths, "cnn", urls)
156
+ dm = _find_files(dl_paths, "dm", urls)
157
+ return cnn + dm
158
+
159
+
160
+ DM_SINGLE_CLOSE_QUOTE = "\u2019" # unicode
161
+ DM_DOUBLE_CLOSE_QUOTE = "\u201d"
162
+ # acceptable ways to end a sentence
163
+ END_TOKENS = [".", "!", "?", "...", "'", "`", '"', DM_SINGLE_CLOSE_QUOTE, DM_DOUBLE_CLOSE_QUOTE, ")"]
164
+
165
+
166
+ def _read_text_file(text_file):
167
+ lines = []
168
+ with open(text_file, "r", encoding="utf-8") as f:
169
+ for line in f:
170
+ lines.append(line.strip())
171
+ return lines
172
+
173
+
174
+ def _get_art_abs(story_file, tfds_version):
175
+ """Get abstract (highlights) and article from a story file path."""
176
+ # Based on https://github.com/abisee/cnn-dailymail/blob/master/
177
+ # make_datafiles.py
178
+
179
+ lines = _read_text_file(story_file)
180
+
181
+ # The github code lowercase the text and we removed it in 3.0.0.
182
+
183
+ # Put periods on the ends of lines that are missing them
184
+ # (this is a problem in the dataset because many image captions don't end in
185
+ # periods; consequently they end up in the body of the article as run-on
186
+ # sentences)
187
+ def fix_missing_period(line):
188
+ """Adds a period to a line that is missing a period."""
189
+ if "@highlight" in line:
190
+ return line
191
+ if not line:
192
+ return line
193
+ if line[-1] in END_TOKENS:
194
+ return line
195
+ return line + " ."
196
+
197
+ lines = [fix_missing_period(line) for line in lines]
198
+
199
+ # Separate out article and abstract sentences
200
+ article_lines = []
201
+ highlights = []
202
+ next_is_highlight = False
203
+ for line in lines:
204
+ if not line:
205
+ continue # empty line
206
+ elif line.startswith("@highlight"):
207
+ next_is_highlight = True
208
+ elif next_is_highlight:
209
+ highlights.append(line)
210
+ else:
211
+ article_lines.append(line)
212
+
213
+ # Make article into a single string
214
+ article = " ".join(article_lines)
215
+
216
+ if tfds_version >= "2.0.0":
217
+ abstract = "\n".join(highlights)
218
+ else:
219
+ abstract = " ".join(highlights)
220
+
221
+ return article, abstract
222
+
223
+
224
+ class CnnDailymail(datasets.GeneratorBasedBuilder):
225
+ """CNN/DailyMail non-anonymized summarization dataset."""
226
+
227
+ BUILDER_CONFIGS = [
228
+ CnnDailymailConfig(name=str(version), description="Plain text", version=version)
229
+ for version in _SUPPORTED_VERSIONS
230
+ ]
231
+
232
+ def _info(self):
233
+ # Should return a datasets.DatasetInfo object
234
+ return datasets.DatasetInfo(
235
+ description=_DESCRIPTION,
236
+ features=datasets.Features(
237
+ {
238
+ _ARTICLE: datasets.Value("string"),
239
+ _HIGHLIGHTS: datasets.Value("string"),
240
+ "id": datasets.Value("string"),
241
+ }
242
+ ),
243
+ supervised_keys=None,
244
+ homepage="https://github.com/abisee/cnn-dailymail",
245
+ citation=_CITATION,
246
+ )
247
+
248
+ def _vocab_text_gen(self, paths):
249
+ for _, ex in self._generate_examples(paths):
250
+ yield " ".join([ex[_ARTICLE], ex[_HIGHLIGHTS]])
251
+
252
+ def _split_generators(self, dl_manager):
253
+ dl_paths = dl_manager.download_and_extract(_DL_URLS)
254
+ train_files = _subset_filenames(dl_paths, datasets.Split.TRAIN)
255
+ # Generate shared vocabulary
256
+
257
+ return [
258
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": train_files}),
259
+ datasets.SplitGenerator(
260
+ name=datasets.Split.VALIDATION,
261
+ gen_kwargs={"files": _subset_filenames(dl_paths, datasets.Split.VALIDATION)},
262
+ ),
263
+ datasets.SplitGenerator(
264
+ name=datasets.Split.TEST, gen_kwargs={"files": _subset_filenames(dl_paths, datasets.Split.TEST)}
265
+ ),
266
+ ]
267
+
268
+ def _generate_examples(self, files):
269
+ for p in files:
270
+ article, highlights = _get_art_abs(p, self.config.version)
271
+ if not article or not highlights:
272
+ continue
273
+ fname = os.path.basename(p)
274
+ yield fname, {
275
+ _ARTICLE: article,
276
+ _HIGHLIGHTS: highlights,
277
+ "id": _get_hash_from_path(fname),
278
+ }
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"3.0.0": {"description": "CNN/DailyMail non-anonymized summarization dataset.\n\nThere are two features:\n - article: text of news article, used as the document to be summarized\n - highlights: joined text of highlights with <s> and </s> around each\n highlight, which is the target summary\n", "citation": "@article{DBLP:journals/corr/SeeLM17,\n author = {Abigail See and\n Peter J. Liu and\n Christopher D. Manning},\n title = {Get To The Point: Summarization with Pointer-Generator Networks},\n journal = {CoRR},\n volume = {abs/1704.04368},\n year = {2017},\n url = {http://arxiv.org/abs/1704.04368},\n archivePrefix = {arXiv},\n eprint = {1704.04368},\n timestamp = {Mon, 13 Aug 2018 16:46:08 +0200},\n biburl = {https://dblp.org/rec/bib/journals/corr/SeeLM17},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n\n@inproceedings{hermann2015teaching,\n title={Teaching machines to read and comprehend},\n author={Hermann, Karl Moritz and Kocisky, Tomas and Grefenstette, Edward and Espeholt, Lasse and Kay, Will and Suleyman, Mustafa and Blunsom, Phil},\n booktitle={Advances in neural information processing systems},\n pages={1693--1701},\n year={2015}\n}\n", "homepage": "https://github.com/abisee/cnn-dailymail", "license": "", "features": {"article": {"dtype": "string", "id": null, "_type": "Value"}, "highlights": {"dtype": "string", "id": null, "_type": "Value"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "cnn_dailymail", "config_name": "3.0.0", "version": {"version_str": "3.0.0", "description": "Using cased version.", "datasets_version_to_prepare": null, "major": 3, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1261704307, "num_examples": 287113, "dataset_name": "cnn_dailymail"}, "validation": {"name": "validation", "num_bytes": 57732436, "num_examples": 13368, "dataset_name": "cnn_dailymail"}, "test": {"name": "test", "num_bytes": 49925756, "num_examples": 11490, "dataset_name": "cnn_dailymail"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ": {"num_bytes": 158577824, "checksum": "e8fbc0027e54e0a916abd9c969eb35f708ed1467d7ef4e3b17a56739d65cb200"}, "https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs": {"num_bytes": 375893739, "checksum": "ad69010002210b7c406718248ee66e65868b9f6820f163aa966369878d14147e"}, "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_test.txt": {"num_bytes": 2109547, "checksum": "c4f5efb5ec2126430a5c156efbd13d0e9c4cb490169e552c38b4a51981a009bd"}, "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_train.txt": {"num_bytes": 46424688, "checksum": "a5cee49f3a6c862c26ce29308236d2a99625ab6c86a43be22d5206b2790d8029"}, "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_val.txt": {"num_bytes": 2433674, "checksum": "81887e982b045083409c6ee838aede8ff4b97291605bcfb21bffc456a16991db"}}, "download_size": 585439472, "dataset_size": 1369362499, "size_in_bytes": 1954801971}, "1.0.0": {"description": "CNN/DailyMail non-anonymized summarization dataset.\n\nThere are two features:\n - article: text of news article, used as the document to be summarized\n - highlights: joined text of highlights with <s> and </s> around each\n highlight, which is the target summary\n", "citation": "@article{DBLP:journals/corr/SeeLM17,\n author = {Abigail See and\n Peter J. Liu and\n Christopher D. Manning},\n title = {Get To The Point: Summarization with Pointer-Generator Networks},\n journal = {CoRR},\n volume = {abs/1704.04368},\n year = {2017},\n url = {http://arxiv.org/abs/1704.04368},\n archivePrefix = {arXiv},\n eprint = {1704.04368},\n timestamp = {Mon, 13 Aug 2018 16:46:08 +0200},\n biburl = {https://dblp.org/rec/bib/journals/corr/SeeLM17},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n\n@inproceedings{hermann2015teaching,\n title={Teaching machines to read and comprehend},\n author={Hermann, Karl Moritz and Kocisky, Tomas and Grefenstette, Edward and Espeholt, Lasse and Kay, Will and Suleyman, Mustafa and Blunsom, Phil},\n booktitle={Advances in neural information processing systems},\n pages={1693--1701},\n year={2015}\n}\n", "homepage": "https://github.com/abisee/cnn-dailymail", "license": "", "features": {"article": {"dtype": "string", "id": null, "_type": "Value"}, "highlights": {"dtype": "string", "id": null, "_type": "Value"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "cnn_dailymail", "config_name": "1.0.0", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1261704307, "num_examples": 287113, "dataset_name": "cnn_dailymail"}, "validation": {"name": "validation", "num_bytes": 57732436, "num_examples": 13368, "dataset_name": "cnn_dailymail"}, "test": {"name": "test", "num_bytes": 49925756, "num_examples": 11490, "dataset_name": "cnn_dailymail"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ": {"num_bytes": 158577824, "checksum": "e8fbc0027e54e0a916abd9c969eb35f708ed1467d7ef4e3b17a56739d65cb200"}, "https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs": {"num_bytes": 375893739, "checksum": "ad69010002210b7c406718248ee66e65868b9f6820f163aa966369878d14147e"}, "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_test.txt": {"num_bytes": 2109547, "checksum": "c4f5efb5ec2126430a5c156efbd13d0e9c4cb490169e552c38b4a51981a009bd"}, "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_train.txt": {"num_bytes": 46424688, "checksum": "a5cee49f3a6c862c26ce29308236d2a99625ab6c86a43be22d5206b2790d8029"}, "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_val.txt": {"num_bytes": 2433674, "checksum": "81887e982b045083409c6ee838aede8ff4b97291605bcfb21bffc456a16991db"}}, "download_size": 585439472, "dataset_size": 1369362499, "size_in_bytes": 1954801971}, "2.0.0": {"description": "CNN/DailyMail non-anonymized summarization dataset.\n\nThere are two features:\n - article: text of news article, used as the document to be summarized\n - highlights: joined text of highlights with <s> and </s> around each\n highlight, which is the target summary\n", "citation": "@article{DBLP:journals/corr/SeeLM17,\n author = {Abigail See and\n Peter J. Liu and\n Christopher D. Manning},\n title = {Get To The Point: Summarization with Pointer-Generator Networks},\n journal = {CoRR},\n volume = {abs/1704.04368},\n year = {2017},\n url = {http://arxiv.org/abs/1704.04368},\n archivePrefix = {arXiv},\n eprint = {1704.04368},\n timestamp = {Mon, 13 Aug 2018 16:46:08 +0200},\n biburl = {https://dblp.org/rec/bib/journals/corr/SeeLM17},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n\n@inproceedings{hermann2015teaching,\n title={Teaching machines to read and comprehend},\n author={Hermann, Karl Moritz and Kocisky, Tomas and Grefenstette, Edward and Espeholt, Lasse and Kay, Will and Suleyman, Mustafa and Blunsom, Phil},\n booktitle={Advances in neural information processing systems},\n pages={1693--1701},\n year={2015}\n}\n", "homepage": "https://github.com/abisee/cnn-dailymail", "license": "", "features": {"article": {"dtype": "string", "id": null, "_type": "Value"}, "highlights": {"dtype": "string", "id": null, "_type": "Value"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "cnn_dailymail", "config_name": "2.0.0", "version": {"version_str": "2.0.0", "description": "Separate target sentences with newline.", "datasets_version_to_prepare": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1261704307, "num_examples": 287113, "dataset_name": "cnn_dailymail"}, "validation": {"name": "validation", "num_bytes": 57732436, "num_examples": 13368, "dataset_name": "cnn_dailymail"}, "test": {"name": "test", "num_bytes": 49925756, "num_examples": 11490, "dataset_name": "cnn_dailymail"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ": {"num_bytes": 158577824, "checksum": "e8fbc0027e54e0a916abd9c969eb35f708ed1467d7ef4e3b17a56739d65cb200"}, "https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs": {"num_bytes": 375893739, "checksum": "ad69010002210b7c406718248ee66e65868b9f6820f163aa966369878d14147e"}, "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_test.txt": {"num_bytes": 2109547, "checksum": "c4f5efb5ec2126430a5c156efbd13d0e9c4cb490169e552c38b4a51981a009bd"}, "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_train.txt": {"num_bytes": 46424688, "checksum": "a5cee49f3a6c862c26ce29308236d2a99625ab6c86a43be22d5206b2790d8029"}, "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_val.txt": {"num_bytes": 2433674, "checksum": "81887e982b045083409c6ee838aede8ff4b97291605bcfb21bffc456a16991db"}}, "download_size": 585439472, "dataset_size": 1369362499, "size_in_bytes": 1954801971}}
dummy/1.0.0/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b259e21b04456559a069e30fc63c586a179532a7c330addce0fc657a88c00c4
3
+ size 4091
dummy/2.0.0/2.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:206bd4640f2396b16952b100dd221d8d4e69189e132bc9c386b6f1bac305411d
3
+ size 4091
dummy/3.0.0/3.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ab30e38a0067d186151996943cd5675de4b36a58aed972d102e84515a0e6a4e
3
+ size 7079