albertvillanova HF staff commited on
Commit
898abee
1 Parent(s): 3adf624

Delete loading script

Browse files
Files changed (1) hide show
  1. cnn_dailymail.py +0 -250
cnn_dailymail.py DELETED
@@ -1,250 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """CNN/DailyMail Summarization dataset, non-anonymized version."""
18
-
19
- import hashlib
20
- import os
21
-
22
- import datasets
23
-
24
-
25
- logger = datasets.logging.get_logger(__name__)
26
-
27
-
28
- _HOMEPAGE = "https://github.com/abisee/cnn-dailymail"
29
-
30
- _DESCRIPTION = """\
31
- CNN/DailyMail non-anonymized summarization dataset.
32
-
33
- There are two features:
34
- - article: text of news article, used as the document to be summarized
35
- - highlights: joined text of highlights with <s> and </s> around each
36
- highlight, which is the target summary
37
- """
38
-
39
- # The second citation introduces the source data, while the first
40
- # introduces the specific form (non-anonymized) we use here.
41
- _CITATION = """\
42
- @article{DBLP:journals/corr/SeeLM17,
43
- author = {Abigail See and
44
- Peter J. Liu and
45
- Christopher D. Manning},
46
- title = {Get To The Point: Summarization with Pointer-Generator Networks},
47
- journal = {CoRR},
48
- volume = {abs/1704.04368},
49
- year = {2017},
50
- url = {http://arxiv.org/abs/1704.04368},
51
- archivePrefix = {arXiv},
52
- eprint = {1704.04368},
53
- timestamp = {Mon, 13 Aug 2018 16:46:08 +0200},
54
- biburl = {https://dblp.org/rec/bib/journals/corr/SeeLM17},
55
- bibsource = {dblp computer science bibliography, https://dblp.org}
56
- }
57
-
58
- @inproceedings{hermann2015teaching,
59
- title={Teaching machines to read and comprehend},
60
- author={Hermann, Karl Moritz and Kocisky, Tomas and Grefenstette, Edward and Espeholt, Lasse and Kay, Will and Suleyman, Mustafa and Blunsom, Phil},
61
- booktitle={Advances in neural information processing systems},
62
- pages={1693--1701},
63
- year={2015}
64
- }
65
- """
66
-
67
- _DL_URLS = {
68
- "cnn_stories": "https://huggingface.co/datasets/cnn_dailymail/resolve/11343c3752184397d56efc19a8a7cceb68089318/data/cnn_stories.tgz",
69
- "dm_stories": "https://huggingface.co/datasets/cnn_dailymail/resolve/11343c3752184397d56efc19a8a7cceb68089318/data/dailymail_stories.tgz",
70
- "train": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_train.txt",
71
- "validation": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_val.txt",
72
- "test": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_test.txt",
73
- }
74
-
75
- _HIGHLIGHTS = "highlights"
76
- _ARTICLE = "article"
77
-
78
- _SUPPORTED_VERSIONS = [
79
- # Using cased version.
80
- datasets.Version("3.0.0", "Using cased version."),
81
- # Same data as 0.0.2
82
- datasets.Version("1.0.0", ""),
83
- # Having the model predict newline separators makes it easier to evaluate
84
- # using summary-level ROUGE.
85
- datasets.Version("2.0.0", "Separate target sentences with newline."),
86
- ]
87
-
88
-
89
- _DEFAULT_VERSION = datasets.Version("3.0.0", "Using cased version.")
90
-
91
-
92
- class CnnDailymailConfig(datasets.BuilderConfig):
93
- """BuilderConfig for CnnDailymail."""
94
-
95
- def __init__(self, **kwargs):
96
- """BuilderConfig for CnnDailymail.
97
-
98
- Args:
99
-
100
- **kwargs: keyword arguments forwarded to super.
101
- """
102
- super(CnnDailymailConfig, self).__init__(**kwargs)
103
-
104
-
105
- def _get_url_hashes(path):
106
- """Get hashes of urls in file."""
107
- urls = _read_text_file_path(path)
108
-
109
- def url_hash(u):
110
- h = hashlib.sha1()
111
- try:
112
- u = u.encode("utf-8")
113
- except UnicodeDecodeError:
114
- logger.error("Cannot hash url: %s", u)
115
- h.update(u)
116
- return h.hexdigest()
117
-
118
- return {url_hash(u) for u in urls}
119
-
120
-
121
- def _get_hash_from_path(p):
122
- """Extract hash from path."""
123
- return os.path.splitext(os.path.basename(p))[0]
124
-
125
-
126
- DM_SINGLE_CLOSE_QUOTE = "\u2019" # unicode
127
- DM_DOUBLE_CLOSE_QUOTE = "\u201d"
128
- # acceptable ways to end a sentence
129
- END_TOKENS = [".", "!", "?", "...", "'", "`", '"', DM_SINGLE_CLOSE_QUOTE, DM_DOUBLE_CLOSE_QUOTE, ")"]
130
-
131
-
132
- def _read_text_file_path(path):
133
- with open(path, "r", encoding="utf-8") as f:
134
- lines = [line.strip() for line in f]
135
- return lines
136
-
137
-
138
- def _read_text_file(file):
139
- return [line.decode("utf-8").strip() for line in file]
140
-
141
-
142
- def _get_art_abs(story_file, tfds_version):
143
- """Get abstract (highlights) and article from a story file path."""
144
- # Based on https://github.com/abisee/cnn-dailymail/blob/master/
145
- # make_datafiles.py
146
-
147
- lines = _read_text_file(story_file)
148
-
149
- # The github code lowercase the text and we removed it in 3.0.0.
150
-
151
- # Put periods on the ends of lines that are missing them
152
- # (this is a problem in the dataset because many image captions don't end in
153
- # periods; consequently they end up in the body of the article as run-on
154
- # sentences)
155
- def fix_missing_period(line):
156
- """Adds a period to a line that is missing a period."""
157
- if "@highlight" in line:
158
- return line
159
- if not line:
160
- return line
161
- if line[-1] in END_TOKENS:
162
- return line
163
- return line + " ."
164
-
165
- lines = [fix_missing_period(line) for line in lines]
166
-
167
- # Separate out article and abstract sentences
168
- article_lines = []
169
- highlights = []
170
- next_is_highlight = False
171
- for line in lines:
172
- if not line:
173
- continue # empty line
174
- elif line.startswith("@highlight"):
175
- next_is_highlight = True
176
- elif next_is_highlight:
177
- highlights.append(line)
178
- else:
179
- article_lines.append(line)
180
-
181
- # Make article into a single string
182
- article = " ".join(article_lines)
183
-
184
- if tfds_version >= "2.0.0":
185
- abstract = "\n".join(highlights)
186
- else:
187
- abstract = " ".join(highlights)
188
-
189
- return article, abstract
190
-
191
-
192
- class CnnDailymail(datasets.GeneratorBasedBuilder):
193
- """CNN/DailyMail non-anonymized summarization dataset."""
194
-
195
- BUILDER_CONFIGS = [
196
- CnnDailymailConfig(name=str(version), description="Plain text", version=version)
197
- for version in _SUPPORTED_VERSIONS
198
- ]
199
-
200
- def _info(self):
201
- return datasets.DatasetInfo(
202
- description=_DESCRIPTION,
203
- features=datasets.Features(
204
- {
205
- _ARTICLE: datasets.Value("string"),
206
- _HIGHLIGHTS: datasets.Value("string"),
207
- "id": datasets.Value("string"),
208
- }
209
- ),
210
- supervised_keys=None,
211
- homepage=_HOMEPAGE,
212
- citation=_CITATION,
213
- )
214
-
215
- def _vocab_text_gen(self, paths):
216
- for _, ex in self._generate_examples(paths):
217
- yield " ".join([ex[_ARTICLE], ex[_HIGHLIGHTS]])
218
-
219
- def _split_generators(self, dl_manager):
220
- dl_paths = dl_manager.download(_DL_URLS)
221
- return [
222
- datasets.SplitGenerator(
223
- name=split,
224
- gen_kwargs={
225
- "urls_file": dl_paths[split],
226
- "files_per_archive": [
227
- dl_manager.iter_archive(dl_paths["cnn_stories"]),
228
- dl_manager.iter_archive(dl_paths["dm_stories"]),
229
- ],
230
- },
231
- )
232
- for split in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]
233
- ]
234
-
235
- def _generate_examples(self, urls_file, files_per_archive):
236
- urls = _get_url_hashes(urls_file)
237
- idx = 0
238
- for files in files_per_archive:
239
- for path, file in files:
240
- hash_from_path = _get_hash_from_path(path)
241
- if hash_from_path in urls:
242
- article, highlights = _get_art_abs(file, self.config.version)
243
- if not article or not highlights:
244
- continue
245
- yield idx, {
246
- _ARTICLE: article,
247
- _HIGHLIGHTS: highlights,
248
- "id": hash_from_path,
249
- }
250
- idx += 1