albertvillanova HF staff commited on
Commit
96df5e6
1 Parent(s): fb656b8

Convert dataset to Parquet (#7)

Browse files

- Convert dataset to Parquet (600f7a8087d9ba5267b035b3321fdd8d4ce408b8)
- Add 2.0.0 data files (b88a802b43dd71ecbc064e8103d2f8109b18d028)
- Add 3.0.0 data files (3adf6249f0cc8409a97a4d38471529ef5f7dc496)
- Delete loading script (898abee5394c5314a1d43f5c954745d0b891e8f1)
- Delete data folder (53e0d9e2320e7d11cc4b737b28db3af02bfb3bc3)

data/cnn_stories.tgz → 1.0.0/test-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e8fbc0027e54e0a916abd9c969eb35f708ed1467d7ef4e3b17a56739d65cb200
3
- size 158577824
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c039e9a26d4a7f71ab2ae18ff31a8d31b04744c41ef47086f1b31e45512c342f
3
+ size 29994057
data/dailymail_stories.tgz → 1.0.0/train-00000-of-00003.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ad69010002210b7c406718248ee66e65868b9f6820f163aa966369878d14147e
3
- size 375893739
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc4f05d20a89fd60c7e9111f8a994cef4408b3ae9c3e128e0f7b758bdfda8498
3
+ size 256494215
1.0.0/train-00001-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6a7d2f57bc50c8fa6b13882f7b4bab12e8192cfe38c47a3298cfcf58daf4dbb
3
+ size 256538980
1.0.0/train-00002-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:416f3febff0552c958a0c526f6fdcd742f9ca44820b7dedd3c1fb309424e51ef
3
+ size 259242781
1.0.0/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eacc3db644938c8d5c2bf744e645c88da736079e386a41c1de9f57809dece894
3
+ size 34657215
2.0.0/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04e322d2634a96dba76bf9a6294fbbe48e0b36abeae43f13d86ba2c3bebffe4e
3
+ size 30000471
2.0.0/train-00000-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a56093836f6283ceac48128515f6c87ade70abb11aec31d23a176c4d90ae1736
3
+ size 256540614
2.0.0/train-00001-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7549c4f8f43c15750f4103aa3ec2966be1f3a51a990e60d97e3a1bac79f8802e
3
+ size 256588241
2.0.0/train-00002-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef3b5a190df25ac99592b802f070e244251553b78a5157a0c88fa19d8ef1a13b
3
+ size 259300551
2.0.0/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65a5ccce932b08f050114ce6e3c39355d563fa24f194052bb2f27d1c6c499c91
3
+ size 34664725
3.0.0/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04e322d2634a96dba76bf9a6294fbbe48e0b36abeae43f13d86ba2c3bebffe4e
3
+ size 30000471
3.0.0/train-00000-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a56093836f6283ceac48128515f6c87ade70abb11aec31d23a176c4d90ae1736
3
+ size 256540614
3.0.0/train-00001-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7549c4f8f43c15750f4103aa3ec2966be1f3a51a990e60d97e3a1bac79f8802e
3
+ size 256588241
3.0.0/train-00002-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef3b5a190df25ac99592b802f070e244251553b78a5157a0c88fa19d8ef1a13b
3
+ size 259300551
3.0.0/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65a5ccce932b08f050114ce6e3c39355d563fa24f194052bb2f27d1c6c499c91
3
+ size 34664725
README.md CHANGED
@@ -19,17 +19,8 @@ task_ids:
19
  - news-articles-summarization
20
  paperswithcode_id: cnn-daily-mail-1
21
  pretty_name: CNN / Daily Mail
22
- train-eval-index:
23
- - config: 3.0.0
24
- task: summarization
25
- task_id: summarization
26
- splits:
27
- eval_split: test
28
- col_mapping:
29
- article: text
30
- highlights: target
31
  dataset_info:
32
- - config_name: 3.0.0
33
  features:
34
  - name: article
35
  dtype: string
@@ -39,17 +30,17 @@ dataset_info:
39
  dtype: string
40
  splits:
41
  - name: train
42
- num_bytes: 1261704133
43
  num_examples: 287113
44
  - name: validation
45
- num_bytes: 57732436
46
  num_examples: 13368
47
  - name: test
48
- num_bytes: 49925756
49
  num_examples: 11490
50
- download_size: 585439472
51
- dataset_size: 1369362325
52
- - config_name: 1.0.0
53
  features:
54
  - name: article
55
  dtype: string
@@ -59,17 +50,17 @@ dataset_info:
59
  dtype: string
60
  splits:
61
  - name: train
62
- num_bytes: 1261704133
63
  num_examples: 287113
64
  - name: validation
65
- num_bytes: 57732436
66
  num_examples: 13368
67
  - name: test
68
- num_bytes: 49925756
69
  num_examples: 11490
70
- download_size: 585439472
71
- dataset_size: 1369362325
72
- - config_name: 2.0.0
73
  features:
74
  - name: article
75
  dtype: string
@@ -79,16 +70,50 @@ dataset_info:
79
  dtype: string
80
  splits:
81
  - name: train
82
- num_bytes: 1261704133
83
  num_examples: 287113
84
  - name: validation
85
- num_bytes: 57732436
86
  num_examples: 13368
87
  - name: test
88
- num_bytes: 49925756
89
  num_examples: 11490
90
- download_size: 585439472
91
- dataset_size: 1369362325
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  ---
93
  # Dataset Card for CNN Dailymail Dataset
94
 
 
19
  - news-articles-summarization
20
  paperswithcode_id: cnn-daily-mail-1
21
  pretty_name: CNN / Daily Mail
 
 
 
 
 
 
 
 
 
22
  dataset_info:
23
+ - config_name: 1.0.0
24
  features:
25
  - name: article
26
  dtype: string
 
30
  dtype: string
31
  splits:
32
  - name: train
33
+ num_bytes: 1261703785
34
  num_examples: 287113
35
  - name: validation
36
+ num_bytes: 57732412
37
  num_examples: 13368
38
  - name: test
39
+ num_bytes: 49925732
40
  num_examples: 11490
41
+ download_size: 836927248
42
+ dataset_size: 1369361929
43
+ - config_name: 2.0.0
44
  features:
45
  - name: article
46
  dtype: string
 
50
  dtype: string
51
  splits:
52
  - name: train
53
+ num_bytes: 1261703785
54
  num_examples: 287113
55
  - name: validation
56
+ num_bytes: 57732412
57
  num_examples: 13368
58
  - name: test
59
+ num_bytes: 49925732
60
  num_examples: 11490
61
+ download_size: 837094602
62
+ dataset_size: 1369361929
63
+ - config_name: 3.0.0
64
  features:
65
  - name: article
66
  dtype: string
 
70
  dtype: string
71
  splits:
72
  - name: train
73
+ num_bytes: 1261703785
74
  num_examples: 287113
75
  - name: validation
76
+ num_bytes: 57732412
77
  num_examples: 13368
78
  - name: test
79
+ num_bytes: 49925732
80
  num_examples: 11490
81
+ download_size: 837094602
82
+ dataset_size: 1369361929
83
+ configs:
84
+ - config_name: 1.0.0
85
+ data_files:
86
+ - split: train
87
+ path: 1.0.0/train-*
88
+ - split: validation
89
+ path: 1.0.0/validation-*
90
+ - split: test
91
+ path: 1.0.0/test-*
92
+ - config_name: 2.0.0
93
+ data_files:
94
+ - split: train
95
+ path: 2.0.0/train-*
96
+ - split: validation
97
+ path: 2.0.0/validation-*
98
+ - split: test
99
+ path: 2.0.0/test-*
100
+ - config_name: 3.0.0
101
+ data_files:
102
+ - split: train
103
+ path: 3.0.0/train-*
104
+ - split: validation
105
+ path: 3.0.0/validation-*
106
+ - split: test
107
+ path: 3.0.0/test-*
108
+ train-eval-index:
109
+ - config: 3.0.0
110
+ task: summarization
111
+ task_id: summarization
112
+ splits:
113
+ eval_split: test
114
+ col_mapping:
115
+ article: text
116
+ highlights: target
117
  ---
118
  # Dataset Card for CNN Dailymail Dataset
119
 
cnn_dailymail.py DELETED
@@ -1,250 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """CNN/DailyMail Summarization dataset, non-anonymized version."""
18
-
19
- import hashlib
20
- import os
21
-
22
- import datasets
23
-
24
-
25
- logger = datasets.logging.get_logger(__name__)
26
-
27
-
28
- _HOMEPAGE = "https://github.com/abisee/cnn-dailymail"
29
-
30
- _DESCRIPTION = """\
31
- CNN/DailyMail non-anonymized summarization dataset.
32
-
33
- There are two features:
34
- - article: text of news article, used as the document to be summarized
35
- - highlights: joined text of highlights with <s> and </s> around each
36
- highlight, which is the target summary
37
- """
38
-
39
- # The second citation introduces the source data, while the first
40
- # introduces the specific form (non-anonymized) we use here.
41
- _CITATION = """\
42
- @article{DBLP:journals/corr/SeeLM17,
43
- author = {Abigail See and
44
- Peter J. Liu and
45
- Christopher D. Manning},
46
- title = {Get To The Point: Summarization with Pointer-Generator Networks},
47
- journal = {CoRR},
48
- volume = {abs/1704.04368},
49
- year = {2017},
50
- url = {http://arxiv.org/abs/1704.04368},
51
- archivePrefix = {arXiv},
52
- eprint = {1704.04368},
53
- timestamp = {Mon, 13 Aug 2018 16:46:08 +0200},
54
- biburl = {https://dblp.org/rec/bib/journals/corr/SeeLM17},
55
- bibsource = {dblp computer science bibliography, https://dblp.org}
56
- }
57
-
58
- @inproceedings{hermann2015teaching,
59
- title={Teaching machines to read and comprehend},
60
- author={Hermann, Karl Moritz and Kocisky, Tomas and Grefenstette, Edward and Espeholt, Lasse and Kay, Will and Suleyman, Mustafa and Blunsom, Phil},
61
- booktitle={Advances in neural information processing systems},
62
- pages={1693--1701},
63
- year={2015}
64
- }
65
- """
66
-
67
- _DL_URLS = {
68
- "cnn_stories": "https://huggingface.co/datasets/cnn_dailymail/resolve/11343c3752184397d56efc19a8a7cceb68089318/data/cnn_stories.tgz",
69
- "dm_stories": "https://huggingface.co/datasets/cnn_dailymail/resolve/11343c3752184397d56efc19a8a7cceb68089318/data/dailymail_stories.tgz",
70
- "train": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_train.txt",
71
- "validation": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_val.txt",
72
- "test": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_test.txt",
73
- }
74
-
75
- _HIGHLIGHTS = "highlights"
76
- _ARTICLE = "article"
77
-
78
- _SUPPORTED_VERSIONS = [
79
- # Using cased version.
80
- datasets.Version("3.0.0", "Using cased version."),
81
- # Same data as 0.0.2
82
- datasets.Version("1.0.0", ""),
83
- # Having the model predict newline separators makes it easier to evaluate
84
- # using summary-level ROUGE.
85
- datasets.Version("2.0.0", "Separate target sentences with newline."),
86
- ]
87
-
88
-
89
- _DEFAULT_VERSION = datasets.Version("3.0.0", "Using cased version.")
90
-
91
-
92
- class CnnDailymailConfig(datasets.BuilderConfig):
93
- """BuilderConfig for CnnDailymail."""
94
-
95
- def __init__(self, **kwargs):
96
- """BuilderConfig for CnnDailymail.
97
-
98
- Args:
99
-
100
- **kwargs: keyword arguments forwarded to super.
101
- """
102
- super(CnnDailymailConfig, self).__init__(**kwargs)
103
-
104
-
105
- def _get_url_hashes(path):
106
- """Get hashes of urls in file."""
107
- urls = _read_text_file_path(path)
108
-
109
- def url_hash(u):
110
- h = hashlib.sha1()
111
- try:
112
- u = u.encode("utf-8")
113
- except UnicodeDecodeError:
114
- logger.error("Cannot hash url: %s", u)
115
- h.update(u)
116
- return h.hexdigest()
117
-
118
- return {url_hash(u) for u in urls}
119
-
120
-
121
- def _get_hash_from_path(p):
122
- """Extract hash from path."""
123
- return os.path.splitext(os.path.basename(p))[0]
124
-
125
-
126
- DM_SINGLE_CLOSE_QUOTE = "\u2019" # unicode
127
- DM_DOUBLE_CLOSE_QUOTE = "\u201d"
128
- # acceptable ways to end a sentence
129
- END_TOKENS = [".", "!", "?", "...", "'", "`", '"', DM_SINGLE_CLOSE_QUOTE, DM_DOUBLE_CLOSE_QUOTE, ")"]
130
-
131
-
132
- def _read_text_file_path(path):
133
- with open(path, "r", encoding="utf-8") as f:
134
- lines = [line.strip() for line in f]
135
- return lines
136
-
137
-
138
- def _read_text_file(file):
139
- return [line.decode("utf-8").strip() for line in file]
140
-
141
-
142
- def _get_art_abs(story_file, tfds_version):
143
- """Get abstract (highlights) and article from a story file path."""
144
- # Based on https://github.com/abisee/cnn-dailymail/blob/master/
145
- # make_datafiles.py
146
-
147
- lines = _read_text_file(story_file)
148
-
149
- # The github code lowercase the text and we removed it in 3.0.0.
150
-
151
- # Put periods on the ends of lines that are missing them
152
- # (this is a problem in the dataset because many image captions don't end in
153
- # periods; consequently they end up in the body of the article as run-on
154
- # sentences)
155
- def fix_missing_period(line):
156
- """Adds a period to a line that is missing a period."""
157
- if "@highlight" in line:
158
- return line
159
- if not line:
160
- return line
161
- if line[-1] in END_TOKENS:
162
- return line
163
- return line + " ."
164
-
165
- lines = [fix_missing_period(line) for line in lines]
166
-
167
- # Separate out article and abstract sentences
168
- article_lines = []
169
- highlights = []
170
- next_is_highlight = False
171
- for line in lines:
172
- if not line:
173
- continue # empty line
174
- elif line.startswith("@highlight"):
175
- next_is_highlight = True
176
- elif next_is_highlight:
177
- highlights.append(line)
178
- else:
179
- article_lines.append(line)
180
-
181
- # Make article into a single string
182
- article = " ".join(article_lines)
183
-
184
- if tfds_version >= "2.0.0":
185
- abstract = "\n".join(highlights)
186
- else:
187
- abstract = " ".join(highlights)
188
-
189
- return article, abstract
190
-
191
-
192
- class CnnDailymail(datasets.GeneratorBasedBuilder):
193
- """CNN/DailyMail non-anonymized summarization dataset."""
194
-
195
- BUILDER_CONFIGS = [
196
- CnnDailymailConfig(name=str(version), description="Plain text", version=version)
197
- for version in _SUPPORTED_VERSIONS
198
- ]
199
-
200
- def _info(self):
201
- return datasets.DatasetInfo(
202
- description=_DESCRIPTION,
203
- features=datasets.Features(
204
- {
205
- _ARTICLE: datasets.Value("string"),
206
- _HIGHLIGHTS: datasets.Value("string"),
207
- "id": datasets.Value("string"),
208
- }
209
- ),
210
- supervised_keys=None,
211
- homepage=_HOMEPAGE,
212
- citation=_CITATION,
213
- )
214
-
215
- def _vocab_text_gen(self, paths):
216
- for _, ex in self._generate_examples(paths):
217
- yield " ".join([ex[_ARTICLE], ex[_HIGHLIGHTS]])
218
-
219
- def _split_generators(self, dl_manager):
220
- dl_paths = dl_manager.download(_DL_URLS)
221
- return [
222
- datasets.SplitGenerator(
223
- name=split,
224
- gen_kwargs={
225
- "urls_file": dl_paths[split],
226
- "files_per_archive": [
227
- dl_manager.iter_archive(dl_paths["cnn_stories"]),
228
- dl_manager.iter_archive(dl_paths["dm_stories"]),
229
- ],
230
- },
231
- )
232
- for split in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]
233
- ]
234
-
235
- def _generate_examples(self, urls_file, files_per_archive):
236
- urls = _get_url_hashes(urls_file)
237
- idx = 0
238
- for files in files_per_archive:
239
- for path, file in files:
240
- hash_from_path = _get_hash_from_path(path)
241
- if hash_from_path in urls:
242
- article, highlights = _get_art_abs(file, self.config.version)
243
- if not article or not highlights:
244
- continue
245
- yield idx, {
246
- _ARTICLE: article,
247
- _HIGHLIGHTS: highlights,
248
- "id": hash_from_path,
249
- }
250
- idx += 1