Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
found
Source Datasets:
original
ArXiv:
Tags:
License:
system HF staff commited on
Commit
f217e92
1 Parent(s): e26947a

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +5 -0
  2. xsum.py +44 -29
README.md CHANGED
@@ -1,7 +1,12 @@
1
  ---
 
2
  languages:
3
  - en
4
  paperswithcode_id: xsum
 
 
 
 
5
  ---
6
 
7
  # Dataset Card for "xsum"
 
1
  ---
2
+ pretty_name: Extreme Summarization (XSum)
3
  languages:
4
  - en
5
  paperswithcode_id: xsum
6
+ task_categories:
7
+ - conditional-text-generation
8
+ task_ids:
9
+ - summarization
10
  ---
11
 
12
  # Dataset Card for "xsum"
xsum.py CHANGED
@@ -95,7 +95,7 @@ class Xsum(datasets.GeneratorBasedBuilder):
95
  """Returns SplitGenerators."""
96
 
97
  files_to_download = {"data": _URL_DATA, "splits": _URL_SPLITS}
98
- downloaded_files = dl_manager.download_and_extract(files_to_download)
99
 
100
  return [
101
  datasets.SplitGenerator(
@@ -103,7 +103,8 @@ class Xsum(datasets.GeneratorBasedBuilder):
103
  gen_kwargs={
104
  "split_path": downloaded_files["splits"],
105
  "split_name": "train",
106
- "data_dir": os.path.join(downloaded_files["data"], "bbc-summary-data"),
 
107
  },
108
  ),
109
  datasets.SplitGenerator(
@@ -111,7 +112,8 @@ class Xsum(datasets.GeneratorBasedBuilder):
111
  gen_kwargs={
112
  "split_path": downloaded_files["splits"],
113
  "split_name": "validation",
114
- "data_dir": os.path.join(downloaded_files["data"], "bbc-summary-data"),
 
115
  },
116
  ),
117
  datasets.SplitGenerator(
@@ -119,37 +121,50 @@ class Xsum(datasets.GeneratorBasedBuilder):
119
  gen_kwargs={
120
  "split_path": downloaded_files["splits"],
121
  "split_name": "test",
122
- "data_dir": os.path.join(downloaded_files["data"], "bbc-summary-data"),
 
123
  },
124
  ),
125
  ]
126
 
127
- def _generate_examples(self, split_path, split_name, data_dir):
128
  """Yields examples."""
129
 
130
  with open(split_path, "r", encoding="utf-8") as f:
131
  split_ids = json.load(f)
132
-
133
- for i in split_ids[split_name]:
134
- with open(os.path.join(data_dir, i + ".summary"), "r", encoding="utf-8") as f:
135
- text = "".join([line for line in f.readlines() if line not in _REMOVE_LINES and line.strip()])
136
- # Each file follows below format:
137
- # [SN]URL[SN]
138
- # http://somelink
139
- #
140
- # [SN]TITLE[SN]
141
- # some intro
142
- #
143
- # [SN]FIRST-SENTENCE[SN]
144
- # some intro
145
- #
146
- # [SN]RESTBODY[SN]
147
- # text line.
148
- # another text line.
149
- # "another text line."
150
-
151
- # According to the following issue, FIRST-SENTENCE
152
- # is the reference summary and TITLE is unused:
153
- # https://github.com/EdinburghNLP/XSum/issues/22
154
- segs = text.split("[SN]")
155
- yield i, {_DOCUMENT: segs[8].strip(), _SUMMARY: segs[6].strip(), _ID: i}
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  """Returns SplitGenerators."""
96
 
97
  files_to_download = {"data": _URL_DATA, "splits": _URL_SPLITS}
98
+ downloaded_files = dl_manager.download(files_to_download)
99
 
100
  return [
101
  datasets.SplitGenerator(
 
103
  gen_kwargs={
104
  "split_path": downloaded_files["splits"],
105
  "split_name": "train",
106
+ "data_dir": "bbc-summary-data",
107
+ "files": dl_manager.iter_archive(downloaded_files["data"]),
108
  },
109
  ),
110
  datasets.SplitGenerator(
 
112
  gen_kwargs={
113
  "split_path": downloaded_files["splits"],
114
  "split_name": "validation",
115
+ "data_dir": "bbc-summary-data",
116
+ "files": dl_manager.iter_archive(downloaded_files["data"]),
117
  },
118
  ),
119
  datasets.SplitGenerator(
 
121
  gen_kwargs={
122
  "split_path": downloaded_files["splits"],
123
  "split_name": "test",
124
+ "data_dir": "bbc-summary-data",
125
+ "files": dl_manager.iter_archive(downloaded_files["data"]),
126
  },
127
  ),
128
  ]
129
 
130
+ def _generate_examples(self, split_path, split_name, data_dir, files):
131
  """Yields examples."""
132
 
133
  with open(split_path, "r", encoding="utf-8") as f:
134
  split_ids = json.load(f)
135
+ split_ids = {k: set(v) for k, v in split_ids.items()}
136
+
137
+ for path, f in files:
138
+ if not split_ids[split_name]:
139
+ break
140
+ elif path.startswith(data_dir) and path.endswith(".summary"):
141
+ i = os.path.basename(path).split(".")[0]
142
+ if i in split_ids[split_name]:
143
+ split_ids[split_name].remove(i)
144
+ text = "".join(
145
+ [
146
+ line.decode("utf-8")
147
+ for line in f.readlines()
148
+ if line.decode("utf-8") not in _REMOVE_LINES and line.strip()
149
+ ]
150
+ )
151
+ # Each file follows below format:
152
+ # [SN]URL[SN]
153
+ # http://somelink
154
+ #
155
+ # [SN]TITLE[SN]
156
+ # some intro
157
+ #
158
+ # [SN]FIRST-SENTENCE[SN]
159
+ # some intro
160
+ #
161
+ # [SN]RESTBODY[SN]
162
+ # text line.
163
+ # another text line.
164
+ # "another text line."
165
+
166
+ # According to the following issue, FIRST-SENTENCE
167
+ # is the reference summary and TITLE is unused:
168
+ # https://github.com/EdinburghNLP/XSum/issues/22
169
+ segs = text.split("[SN]")
170
+ yield i, {_DOCUMENT: segs[8].strip(), _SUMMARY: segs[6].strip(), _ID: i}