Datasets:
Yeb Havinga
commited on
Commit
•
05f41a7
1
Parent(s):
d68620a
Fix reading from interleaved files in the case there is one file
Browse files- mc4_nl_cleaned.py +13 -14
- test_mc4_nl_cleaned.py +57 -0
mc4_nl_cleaned.py
CHANGED
@@ -195,20 +195,19 @@ class Mc4(datasets.GeneratorBasedBuilder):
|
|
195 |
args = [iter(iterable)] * n
|
196 |
return zip_longest(*args, fillvalue=fillvalue)
|
197 |
|
|
|
|
|
|
|
|
|
|
|
198 |
def _generate_examples(self, filepaths):
|
199 |
"""This function returns the examples in the raw (text) form by iterating on all the files."""
|
200 |
id_ = 0
|
201 |
-
for
|
202 |
-
logger.info(f"Generating examples from {
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
yield id_, example
|
210 |
-
id_ += 1
|
211 |
-
if line2:
|
212 |
-
example = json.loads(line2)
|
213 |
-
yield id_, example
|
214 |
-
id_ += 1
|
|
|
195 |
args = [iter(iterable)] * n
|
196 |
return zip_longest(*args, fillvalue=fillvalue)
|
197 |
|
198 |
+
@staticmethod
|
199 |
+
def gzip_open(filepath):
|
200 |
+
if filepath:
|
201 |
+
return gzip.open(open(filepath, "rb"), "rt", encoding="utf-8")
|
202 |
+
|
203 |
def _generate_examples(self, filepaths):
|
204 |
"""This function returns the examples in the raw (text) form by iterating on all the files."""
|
205 |
id_ = 0
|
206 |
+
for files in self.grouper(filepaths, 2, None):
|
207 |
+
logger.info(f"Generating examples from {files}")
|
208 |
+
gzip_iters = [self.gzip_open(file) for file in files if file is not None]
|
209 |
+
for lines in zip(*gzip_iters):
|
210 |
+
for line in lines:
|
211 |
+
example = json.loads(line)
|
212 |
+
yield id_, example
|
213 |
+
id_ += 1
|
|
|
|
|
|
|
|
|
|
|
|
test_mc4_nl_cleaned.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from itertools import chain
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
from datasets import load_dataset
|
5 |
+
|
6 |
+
|
7 |
+
def test_streaming_dataset():
|
8 |
+
datasets = load_dataset("./mc4_nl_cleaned.py", "tiny", streaming=True)
|
9 |
+
assert list(datasets.keys()) == ["train", "validation"]
|
10 |
+
|
11 |
+
train_ds = datasets["train"]
|
12 |
+
val_ds = datasets["validation"]
|
13 |
+
|
14 |
+
i = iter(train_ds)
|
15 |
+
e = next(i)
|
16 |
+
assert len(e["text"]) == 1264
|
17 |
+
e = next(i)
|
18 |
+
assert len(e["text"]) == 1091
|
19 |
+
|
20 |
+
assert sum(1 for _ in iter(val_ds)) == 16189
|
21 |
+
|
22 |
+
|
23 |
+
def test_batch_dataset():
|
24 |
+
datasets = load_dataset(
|
25 |
+
"./mc4_nl_cleaned.py",
|
26 |
+
"micro",
|
27 |
+
)
|
28 |
+
assert list(datasets.keys()) == ["train", "validation"]
|
29 |
+
|
30 |
+
train_ds = datasets["train"]
|
31 |
+
val_ds = datasets["validation"]
|
32 |
+
|
33 |
+
e = next(iter(train_ds))
|
34 |
+
assert len(e["text"]) == 1264
|
35 |
+
|
36 |
+
assert train_ds.num_rows == 125938
|
37 |
+
assert val_ds.num_rows == 16189
|
38 |
+
|
39 |
+
|
40 |
+
def test_nl_en():
|
41 |
+
datasets = load_dataset(
|
42 |
+
"./mc4_nl_cleaned.py",
|
43 |
+
"micro_en_nl",
|
44 |
+
)
|
45 |
+
assert list(datasets.keys()) == ["train", "validation"]
|
46 |
+
|
47 |
+
train_ds = datasets["train"]
|
48 |
+
val_ds = datasets["validation"]
|
49 |
+
|
50 |
+
i = iter(train_ds)
|
51 |
+
e = next(i)
|
52 |
+
assert len(e["text"]) == 1264 ## Length of 'Japanse bedrijven zijn ...'
|
53 |
+
e = next(i)
|
54 |
+
assert len(e["text"]) == 747 ## Length of 'Beginners BBQ class ...'
|
55 |
+
|
56 |
+
assert train_ds.num_rows == 251900
|
57 |
+
assert val_ds.num_rows == 32378
|