system HF staff commited on
Commit
fea0825
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dummy/all/1.2.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d253ae4f927f42dd7d4610fa8b4838d52b17c3d6552b971159fb34d624723ae2
3
+ size 2840
dummy/sep/1.2.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c9ba09a665e4649d31c26123b1f582598ec848441b378a714eceef7c5300dc6
3
+ size 1665
urls_checksums/checksums.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ https://raw.githubusercontent.com/mahnazkoupaee/WikiHow-Dataset/master/all_test.txt 182146 c7b3037410bdfbed258cf96dcd5e7d04bc7432b9c20c6e6cd8063b6db84f8f94
2
+ https://raw.githubusercontent.com/mahnazkoupaee/WikiHow-Dataset/master/all_train.txt 5096074 149bdd47ca9607bc5c730805d01c8fd879ffd7f328fd6869e6109288e8cfe733
3
+ https://raw.githubusercontent.com/mahnazkoupaee/WikiHow-Dataset/master/all_val.txt 182165 759789579a0a96783f054387d5c2d7b537db75f462629116743f0ac3e7450be4
wikihow.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """WikiHow Datasets."""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import csv
22
+ import os
23
+ import re
24
+
25
+ import datasets
26
+
27
+
28
+ _CITATION = """
29
+ @misc{koupaee2018wikihow,
30
+ title={WikiHow: A Large Scale Text Summarization Dataset},
31
+ author={Mahnaz Koupaee and William Yang Wang},
32
+ year={2018},
33
+ eprint={1810.09305},
34
+ archivePrefix={arXiv},
35
+ primaryClass={cs.CL}
36
+ }
37
+ """
38
+
39
+ _DESCRIPTION = """
40
+ WikiHow is a new large-scale dataset using the online WikiHow
41
+ (http://www.wikihow.com/) knowledge base.
42
+
43
+ There are two features:
44
+ - text: wikihow answers texts.
45
+ - headline: bold lines as summary.
46
+
47
+ There are two separate versions:
48
+ - all: consisting of the concatenation of all paragraphs as the articles and
49
+ the bold lines as the reference summaries.
50
+ - sep: consisting of each paragraph and its summary.
51
+
52
+ Download "wikihowAll.csv" and "wikihowSep.csv" from
53
+ https://github.com/mahnazkoupaee/WikiHow-Dataset and place them in manual folder
54
+ https://www.tensorflow.org/datasets/api_docs/python/tfds/download/DownloadConfig.
55
+ Train/validation/test splits are provided by the authors.
56
+ Preprocessing is applied to remove short articles
57
+ (abstract length < 0.75 article length) and clean up extra commas.
58
+ """
59
+
60
+ _DOCUMENT = "text"
61
+ _SUMMARY = "headline"
62
+
63
+ _URLS = {
64
+ "train": "https://raw.githubusercontent.com/mahnazkoupaee/WikiHow-Dataset/master/all_train.txt",
65
+ "validation": "https://raw.githubusercontent.com/mahnazkoupaee/WikiHow-Dataset/master/all_val.txt",
66
+ "test": "https://raw.githubusercontent.com/mahnazkoupaee/WikiHow-Dataset/master/all_test.txt",
67
+ }
68
+
69
+
70
+ class WikihowConfig(datasets.BuilderConfig):
71
+ """BuilderConfig for Wikihow."""
72
+
73
+ def __init__(self, filename=None, **kwargs):
74
+ """BuilderConfig for Wikihow.
75
+
76
+ Args:
77
+ filename: filename of different configs for the dataset.
78
+ **kwargs: keyword arguments forwarded to super.
79
+ """
80
+ # Version 1.1.0 remove empty document and summary strings.
81
+ # Version 1.2.0 add train validation test split, add cleaning & filtering.
82
+ super(WikihowConfig, self).__init__(version=datasets.Version("1.2.0"), **kwargs)
83
+ self.filename = filename
84
+
85
+
86
+ class Wikihow(datasets.GeneratorBasedBuilder):
87
+ """WikiHow: A Large Scale Text Summarization Dataset."""
88
+
89
+ BUILDER_CONFIGS = [
90
+ WikihowConfig(
91
+ name="all",
92
+ filename="wikihowAll.csv",
93
+ description="Use the concatenation of all paragraphs as the articles"
94
+ " and the bold lines as the reference summaries",
95
+ ),
96
+ WikihowConfig(name="sep", filename="wikihowSep.csv", description="use each paragraph and its summary."),
97
+ ]
98
+
99
+ @property
100
+ def manual_download_instructions(self):
101
+ return """\
102
+ You need to manually download two wikihow files. An overview of which files to download can be seen at https://github.com/mahnazkoupaee/WikiHow-Dataset.
103
+ You need to download the following two files manually:
104
+ 1) https://ucsb.app.box.com/s/ap23l8gafpezf4tq3wapr6u8241zz358 and save the file under <path/to/folder>/wikihowAll.csv
105
+ 2) https://ucsb.app.box.com/s/7yq601ijl1lzvlfu4rjdbbxforzd2oag and save the file under <path/to/folder>/wikihowSep.csv
106
+
107
+ The <path/to/folder> can e.g. be "~/manual_wikihow_data".
108
+
109
+ Wikihow can then be loaded using the following command `datasets.load_dataset("wikihow", data_dir="<path/to/folder>")`.
110
+ """
111
+
112
+ def _info(self):
113
+ feature_names = [_DOCUMENT, _SUMMARY, "title"]
114
+ if self.config.name == "sep":
115
+ feature_names.extend(["overview", "sectionLabel"])
116
+ return datasets.DatasetInfo(
117
+ description=_DESCRIPTION,
118
+ features=datasets.Features({k: datasets.Value("string") for k in feature_names}),
119
+ supervised_keys=None,
120
+ homepage="https://github.com/mahnazkoupaee/WikiHow-Dataset",
121
+ citation=_CITATION,
122
+ )
123
+
124
+ def _split_generators(self, dl_manager):
125
+ """Returns SplitGenerators."""
126
+ dl_path = dl_manager.download_and_extract(_URLS)
127
+ titles = {k: set() for k in dl_path}
128
+ for k, path in dl_path.items():
129
+ with open(path, encoding="utf-8") as f:
130
+ for line in f:
131
+ titles[k].add(line.strip())
132
+
133
+ path_to_manual_file = os.path.join(
134
+ os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), self.config.filename
135
+ )
136
+
137
+ if not os.path.exists(path_to_manual_file):
138
+ raise FileNotFoundError(
139
+ "{} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('wikihow', data_dir=...)` that includes a file name {}. Manual download instructions: {})".format(
140
+ path_to_manual_file, self.config.filename, self.manual_download_instructions
141
+ )
142
+ )
143
+ return [
144
+ datasets.SplitGenerator(
145
+ name=datasets.Split.TRAIN,
146
+ gen_kwargs={
147
+ "path": path_to_manual_file,
148
+ "title_set": titles["train"],
149
+ },
150
+ ),
151
+ datasets.SplitGenerator(
152
+ name=datasets.Split.VALIDATION,
153
+ gen_kwargs={
154
+ "path": path_to_manual_file,
155
+ "title_set": titles["validation"],
156
+ },
157
+ ),
158
+ datasets.SplitGenerator(
159
+ name=datasets.Split.TEST,
160
+ gen_kwargs={
161
+ "path": path_to_manual_file,
162
+ "title_set": titles["test"],
163
+ },
164
+ ),
165
+ ]
166
+
167
+ def _generate_examples(self, path=None, title_set=None):
168
+ """Yields examples."""
169
+ with open(path, encoding="utf-8") as f:
170
+ reader = csv.reader(f)
171
+ headers = next(reader)
172
+ if self.config.name == "all" and headers != ["headline", "title", "text"]:
173
+ raise ValueError("Mismatched header in WikiAll.txt")
174
+ if self.config.name == "sep" and headers != ["overview", "headline", "text", "sectionLabel", "title"]:
175
+ raise ValueError("Mismatched header in WikiSep.txt")
176
+ key2id = {key: i for i, key in enumerate(headers)}
177
+ for i, line in enumerate(reader):
178
+ # skip empty line or insufficient line.
179
+ if len(line) == len(key2id):
180
+ summary = line[key2id[_SUMMARY]].strip()
181
+ document = line[key2id[_DOCUMENT]].strip()
182
+ summary, document = _filter_and_clean(summary, document)
183
+ if summary and document:
184
+ if line[key2id["title"]].strip().replace(" ", "") in title_set:
185
+ d = {k: line[v].strip() for k, v in key2id.items() if k not in [_SUMMARY, _DOCUMENT]}
186
+ d[_DOCUMENT] = document
187
+ d[_SUMMARY] = summary
188
+ yield i, d
189
+
190
+
191
+ # This functions follow data processing acoording to original paper at
192
+ # https://github.com/mahnazkoupaee/WikiHow-Dataset/blob/master/process.py
193
+ def _filter_and_clean(abstract, article):
194
+ """Remove short article and clean up commas in abstract and article."""
195
+ # a threshold is used to remove short articles with long summaries
196
+ # as well as articles with no summary
197
+ if len(abstract) < (0.75 * len(article)):
198
+ # remove extra commas in abstracts
199
+ abstract = abstract.replace(".,", ".")
200
+ # remove extra commas in articles
201
+ article = re.sub(r"[.]+[\n]+[,]", ".\n", article)
202
+ return abstract, article
203
+ else:
204
+ return "", ""