Make the dataset streamable
Browse filesThis way people can do
```python
from datasets import load_dataset
ds = load_dataset("csebuetnlp/xlsum", streaming=True)
for example in ds["train"]:
...
```
without downloading the full dataset
xlsum.py
CHANGED
@@ -41,7 +41,7 @@ _HOMEPAGE = "https://github.com/csebuetnlp/xl-sum"
|
|
41 |
|
42 |
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)"
|
43 |
|
44 |
-
|
45 |
|
46 |
_LANGUAGES = [
|
47 |
"oromo",
|
@@ -125,39 +125,45 @@ class Xlsum(datasets.GeneratorBasedBuilder):
|
|
125 |
def _split_generators(self, dl_manager):
|
126 |
"""Returns SplitGenerators."""
|
127 |
lang = str(self.config.name)
|
128 |
-
|
129 |
|
130 |
-
|
|
|
131 |
return [
|
132 |
datasets.SplitGenerator(
|
133 |
name=datasets.Split.TRAIN,
|
134 |
gen_kwargs={
|
135 |
-
"filepath":
|
|
|
136 |
},
|
137 |
),
|
138 |
datasets.SplitGenerator(
|
139 |
name=datasets.Split.TEST,
|
140 |
gen_kwargs={
|
141 |
-
"filepath":
|
|
|
142 |
},
|
143 |
),
|
144 |
datasets.SplitGenerator(
|
145 |
name=datasets.Split.VALIDATION,
|
146 |
gen_kwargs={
|
147 |
-
"filepath":
|
|
|
148 |
},
|
149 |
),
|
150 |
]
|
151 |
|
152 |
-
def _generate_examples(self, filepath):
|
153 |
"""Yields examples as (key, example) tuples."""
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
|
|
|
|
|
41 |
|
42 |
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)"
|
43 |
|
44 |
+
_DATA_PATH = "data/{}_XLSum_v{}.tar.bz2" # relative path to the data
|
45 |
|
46 |
_LANGUAGES = [
|
47 |
"oromo",
|
|
|
125 |
def _split_generators(self, dl_manager):
|
126 |
"""Returns SplitGenerators."""
|
127 |
lang = str(self.config.name)
|
128 |
+
data_path = _DATA_PATH.format(lang, self.VERSION.version_str[:-2])
|
129 |
|
130 |
+
# we download and use dl_manager.iter_archive to be able to load the dataset in streaming mode
|
131 |
+
archive = dl_manager.download(data_path)
|
132 |
return [
|
133 |
datasets.SplitGenerator(
|
134 |
name=datasets.Split.TRAIN,
|
135 |
gen_kwargs={
|
136 |
+
"filepath": lang + "_train.jsonl,
|
137 |
+
"files": dl_manager.iter_archive(archive),
|
138 |
},
|
139 |
),
|
140 |
datasets.SplitGenerator(
|
141 |
name=datasets.Split.TEST,
|
142 |
gen_kwargs={
|
143 |
+
"filepath": lang + "_test.jsonl",
|
144 |
+
"files": dl_manager.iter_archive(archive),
|
145 |
},
|
146 |
),
|
147 |
datasets.SplitGenerator(
|
148 |
name=datasets.Split.VALIDATION,
|
149 |
gen_kwargs={
|
150 |
+
"filepath": lang + "_val.jsonl",
|
151 |
+
"files": dl_manager.iter_archive(archive),
|
152 |
},
|
153 |
),
|
154 |
]
|
155 |
|
156 |
+
def _generate_examples(self, filepath, files):
|
157 |
"""Yields examples as (key, example) tuples."""
|
158 |
+
for path, f in files:
|
159 |
+
if path == filepath:
|
160 |
+
for idx_, row in enumerate(f):
|
161 |
+
data = json.loads(row)
|
162 |
+
yield idx_, {
|
163 |
+
"id": data["id"],
|
164 |
+
"url": data["url"],
|
165 |
+
"title": data["title"],
|
166 |
+
"summary": data["summary"],
|
167 |
+
"text": data["text"],
|
168 |
+
}
|
169 |
+
break
|